code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import torch from torch import nn class _A ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=False ) -> List[Any]: '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = n_token __UpperCAmelCase : Optional[Any] = d_embed __UpperCAmelCase : int = d_proj __UpperCAmelCase : Tuple = cutoffs + [n_token] __UpperCAmelCase : List[Any] = [0] + self.cutoffs __UpperCAmelCase : int = div_val __UpperCAmelCase : Optional[int] = self.cutoffs[0] __UpperCAmelCase : Union[str, Any] = len(self.cutoffs ) - 1 __UpperCAmelCase : Tuple = self.shortlist_size + self.n_clusters if self.n_clusters > 0: __UpperCAmelCase : Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) __UpperCAmelCase : Any = nn.Parameter(torch.zeros(self.n_clusters ) ) __UpperCAmelCase : List[str] = nn.ModuleList() __UpperCAmelCase : Tuple = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCAmelCase , __UpperCAmelCase ) ) ) else: self.out_projs.append(__UpperCAmelCase ) self.out_layers.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) ) else: for i in range(len(self.cutoffs ) ): __UpperCAmelCase , __UpperCAmelCase : str = self.cutoff_ends[i], self.cutoff_ends[i + 1] __UpperCAmelCase : List[Any] = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCAmelCase , __UpperCAmelCase ) ) ) self.out_layers.append(nn.Linear(__UpperCAmelCase , r_idx - l_idx ) ) __UpperCAmelCase : Union[str, Any] = keep_order def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' if proj is None: __UpperCAmelCase : Any = nn.functional.linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: __UpperCAmelCase : str = nn.functional.linear(__UpperCAmelCase , proj.t().contiguous() ) __UpperCAmelCase : Tuple = nn.functional.linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int: '''simple docstring''' if labels is not None: # Shift so that tokens < n predict n __UpperCAmelCase : List[str] = hidden[..., :-1, :].contiguous() __UpperCAmelCase : int = labels[..., 1:].contiguous() __UpperCAmelCase : List[str] = hidden.view(-1 , hidden.size(-1 ) ) __UpperCAmelCase : Union[str, Any] = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" ) else: __UpperCAmelCase : List[Any] = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: __UpperCAmelCase : Union[str, Any] = self._compute_logit(__UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: __UpperCAmelCase : Any = labels != -100 __UpperCAmelCase : Optional[int] = torch.zeros_like(__UpperCAmelCase , dtype=hidden.dtype , device=hidden.device ) __UpperCAmelCase : str = ( -nn.functional.log_softmax(__UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: __UpperCAmelCase : Optional[int] = nn.functional.log_softmax(__UpperCAmelCase , dim=-1 ) else: # construct weights and biases __UpperCAmelCase , __UpperCAmelCase : Tuple = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: __UpperCAmelCase , __UpperCAmelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1] __UpperCAmelCase : Tuple = self.out_layers[0].weight[l_idx:r_idx] __UpperCAmelCase : Optional[int] = self.out_layers[0].bias[l_idx:r_idx] else: __UpperCAmelCase : Tuple = self.out_layers[i].weight __UpperCAmelCase : int = self.out_layers[i].bias if i == 0: __UpperCAmelCase : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 ) __UpperCAmelCase : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(__UpperCAmelCase ) biases.append(__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = weights[0], biases[0], self.out_projs[0] __UpperCAmelCase : List[str] = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : str = nn.functional.log_softmax(__UpperCAmelCase , dim=1 ) if labels is None: __UpperCAmelCase : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: __UpperCAmelCase : List[str] = torch.zeros_like(__UpperCAmelCase , dtype=hidden.dtype , device=hidden.device ) __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : int = [0] + self.cutoffs for i in range(len(__UpperCAmelCase ) - 1 ): __UpperCAmelCase , __UpperCAmelCase : int = cutoff_values[i], cutoff_values[i + 1] if labels is not None: __UpperCAmelCase : Any = (labels >= l_idx) & (labels < r_idx) __UpperCAmelCase : List[str] = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue __UpperCAmelCase : List[Any] = labels.index_select(0 , __UpperCAmelCase ) - l_idx __UpperCAmelCase : str = head_logprob.index_select(0 , __UpperCAmelCase ) __UpperCAmelCase : Dict = hidden.index_select(0 , __UpperCAmelCase ) else: __UpperCAmelCase : List[Any] = hidden if i == 0: if labels is not None: __UpperCAmelCase : int = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: __UpperCAmelCase : Union[str, Any] = head_logprob[:, : self.cutoffs[0]] else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = weights[i], biases[i], self.out_projs[i] __UpperCAmelCase : Optional[Any] = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = nn.functional.log_softmax(__UpperCAmelCase , dim=1 ) __UpperCAmelCase : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: __UpperCAmelCase : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: __UpperCAmelCase : List[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i __UpperCAmelCase : int = logprob_i if labels is not None: if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order: out.index_copy_(0 , __UpperCAmelCase , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if self.n_clusters == 0: __UpperCAmelCase : Optional[int] = self._compute_logit(__UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(__UpperCAmelCase , dim=-1 ) else: # construct weights and biases __UpperCAmelCase , __UpperCAmelCase : List[str] = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] __UpperCAmelCase : int = self.out_layers[0].weight[l_idx:r_idx] __UpperCAmelCase : List[str] = self.out_layers[0].bias[l_idx:r_idx] else: __UpperCAmelCase : Optional[Any] = self.out_layers[i].weight __UpperCAmelCase : Union[str, Any] = self.out_layers[i].bias if i == 0: __UpperCAmelCase : int = torch.cat([weight_i, self.cluster_weight] , dim=0 ) __UpperCAmelCase : Any = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(__UpperCAmelCase ) biases.append(__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = weights[0], biases[0], self.out_projs[0] __UpperCAmelCase : Optional[int] = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) __UpperCAmelCase : List[Any] = nn.functional.log_softmax(__UpperCAmelCase , dim=1 ) __UpperCAmelCase : Any = [0] + self.cutoffs for i in range(len(__UpperCAmelCase ) - 1 ): __UpperCAmelCase , __UpperCAmelCase : Dict = cutoff_values[i], cutoff_values[i + 1] if i == 0: __UpperCAmelCase : Optional[int] = head_logprob[:, : self.cutoffs[0]] else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = weights[i], biases[i], self.out_projs[i] __UpperCAmelCase : Union[str, Any] = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : List[str] = nn.functional.log_softmax(__UpperCAmelCase , dim=1 ) __UpperCAmelCase : str = head_logprob[:, -i] + tail_logprob_i __UpperCAmelCase : List[str] = logprob_i return out
16
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Dict = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[Any] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[str] = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = LlamaModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : Optional[Any] = """single_label_classification""" __UpperCAmelCase : int = input_dict["""input_ids"""] __UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : str = """multi_label_classification""" __UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> Dict: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """ __UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" ) __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
16
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar _UpperCamelCase = TypeVar('''T''') _UpperCamelCase = TypeVar('''U''') class _A ( Generic[T, U] ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = key __UpperCAmelCase : List[str] = val __UpperCAmelCase : DoubleLinkedListNode[T, U] | None = None __UpperCAmelCase : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: '''simple docstring''' return ( f'Node: key: {self.key}, val: {self.val}, ' f'has next: {bool(self.next )}, has prev: {bool(self.prev )}' ) class _A ( Generic[T, U] ): def __init__( self ) -> None: '''simple docstring''' __UpperCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.rear, self.head def __repr__( self ) -> str: '''simple docstring''' __UpperCAmelCase : List[Any] = ["""DoubleLinkedList"""] __UpperCAmelCase : int = self.head while node.next is not None: rep.append(str(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = node.next rep.append(str(self.rear ) ) return ",\n ".join(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None __UpperCAmelCase : str = node __UpperCAmelCase : int = previous __UpperCAmelCase : Any = node __UpperCAmelCase : Tuple = self.rear def __A ( self , __UpperCAmelCase ) -> DoubleLinkedListNode[T, U] | None: '''simple docstring''' if node.prev is None or node.next is None: return None __UpperCAmelCase : str = node.next __UpperCAmelCase : Optional[int] = node.prev __UpperCAmelCase : Tuple = None __UpperCAmelCase : List[str] = None return node class _A ( Generic[T, U] ): _SCREAMING_SNAKE_CASE : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : DoubleLinkedList[T, U] = DoubleLinkedList() __UpperCAmelCase : Optional[int] = capacity __UpperCAmelCase : str = 0 __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: '''simple docstring''' return ( f'CacheInfo(hits={self.hits}, misses={self.miss}, ' f'capacity={self.capacity}, current size={self.num_keys})' ) def __contains__( self , __UpperCAmelCase ) -> bool: '''simple docstring''' return key in self.cache def __A ( self , __UpperCAmelCase ) -> U | None: '''simple docstring''' # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 __UpperCAmelCase : DoubleLinkedListNode[T, U] = self.cache[key] __UpperCAmelCase : Optional[int] = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(__UpperCAmelCase ) return node.val self.miss += 1 return None def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity __UpperCAmelCase : Optional[int] = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(__UpperCAmelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 __UpperCAmelCase : Optional[int] = DoubleLinkedListNode(__UpperCAmelCase , __UpperCAmelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value __UpperCAmelCase : Dict = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list __UpperCAmelCase : Optional[int] = value self.list.add(__UpperCAmelCase ) @classmethod def __A ( cls , __UpperCAmelCase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: '''simple docstring''' def cache_decorator_inner(__UpperCAmelCase ) -> Callable[..., U]: def cache_decorator_wrapper(*__UpperCAmelCase ) -> U: if func not in cls.decorator_function_to_instance_map: __UpperCAmelCase : Union[str, Any] = LRUCache(__UpperCAmelCase ) __UpperCAmelCase : int = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: __UpperCAmelCase : Optional[int] = func(*__UpperCAmelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , __UpperCAmelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(__UpperCAmelCase , """cache_info""" , __UpperCAmelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
16
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip _UpperCamelCase = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ): """simple docstring""" return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths ) def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = [] if args.gold_data_mode == "qa": __UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ ) for answer_list in data[1]: __UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ ) answers.append(lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : str = [[reference] for reference in references] __UpperCAmelCase : Optional[int] = 0 for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ): total += 1 em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __UpperCAmelCase : int = 100.0 * em / total __UpperCAmelCase : Dict = 100.0 * fa / total logger.info(f'F1: {fa:.2f}' ) logger.info(f'EM: {em:.2f}' ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : Tuple = args.k __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = 0 for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] ) __UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k __UpperCAmelCase : List[str] = 100.0 * em / total logger.info(f'Precision@{k}: {em: .2f}' ) def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ): """simple docstring""" def strip_title(lowerCAmelCase__ : Optional[int] ): if title.startswith("""\"""" ): __UpperCAmelCase : List[Any] = title[1:] if title.endswith("""\"""" ): __UpperCAmelCase : int = title[:-1] return title __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device ) __UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ ) __UpperCAmelCase : int = question_enc_outputs[0] __UpperCAmelCase : Dict = rag_model.retriever( lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) __UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) __UpperCAmelCase : Union[str, Any] = [] for docs in all_docs: __UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(lowerCAmelCase__ ) ) return provenance_strings def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ): """simple docstring""" with torch.no_grad(): __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ ) __UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device ) __UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device ) __UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) __UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) if args.print_predictions: for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) ) return answers def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) __UpperCAmelCase : str = parser.parse_args() __UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = {} if args.model_type is None: __UpperCAmelCase : str = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration __UpperCAmelCase : Dict = args.n_docs if args.index_name is not None: __UpperCAmelCase : Union[str, Any] = args.index_name if args.index_path is not None: __UpperCAmelCase : Dict = args.index_path else: __UpperCAmelCase : str = BartForConditionalGeneration __UpperCAmelCase : str = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k __UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) __UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ ) model.retriever.init_retrieval() else: __UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: __UpperCAmelCase : Union[str, Any] = [] for line in tqdm(lowerCAmelCase__ ): questions.append(line.strip() ) if len(lowerCAmelCase__ ) == args.eval_batch_size: __UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" ) preds_file.flush() __UpperCAmelCase : List[str] = [] if len(lowerCAmelCase__ ) > 0: __UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) ) preds_file.flush() score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": _UpperCamelCase = get_args() main(args)
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
16
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _A : @staticmethod def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 ) __UpperCAmelCase : Tuple = len(__UpperCAmelCase ) self.assertGreater(__UpperCAmelCase , 0 ) self.assertEqual( __UpperCAmelCase , [ { """score""": ANY(__UpperCAmelCase ), """label""": ANY(__UpperCAmelCase ), """box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )}, } for i in range(__UpperCAmelCase ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> Tuple: '''simple docstring''' pass @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) __UpperCAmelCase : str = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) __UpperCAmelCase : Any = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> List[str]: '''simple docstring''' pass @require_torch @slow def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = 0.2 __UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 2 __UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
16
1
'''simple docstring''' from datetime import datetime import requests def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" __UpperCAmelCase : List[str] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url=""" __UpperCAmelCase : Tuple = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""] return requests.get(lowerCAmelCase__ ).content if __name__ == "__main__": _UpperCamelCase = input('''Enter Video/IGTV url: ''').strip() _UpperCamelCase = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, '''wb''') as fp: fp.write(download_video(url)) print(F'Done. Video saved to disk as {file_name}.')
16
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} _UpperCamelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } _UpperCamelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } _UpperCamelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[Any] = ConvBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars ): __UpperCAmelCase : Dict = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) ) __UpperCAmelCase : Union[str, Any] = do_lower_case __UpperCAmelCase : str = strip_accents __UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars __UpperCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase ) __UpperCAmelCase : List[Any] = do_lower_case def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
16
1
'''simple docstring''' import re import string import numpy as np import datasets _UpperCamelCase = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' _UpperCamelCase = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' _UpperCamelCase = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def __A ( self ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) -> Any: '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: __UpperCAmelCase : List[Any] = np.array([re.sub(__UpperCAmelCase , """""" , __UpperCAmelCase ) for x in predictions] ) __UpperCAmelCase : int = np.array([re.sub(__UpperCAmelCase , """""" , __UpperCAmelCase ) for x in references] ) else: __UpperCAmelCase : str = np.asarray(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = np.asarray(__UpperCAmelCase ) if ignore_case: __UpperCAmelCase : Tuple = np.char.lower(__UpperCAmelCase ) __UpperCAmelCase : str = np.char.lower(__UpperCAmelCase ) if ignore_punctuation: __UpperCAmelCase : Union[str, Any] = string.punctuation.maketrans("""""" , """""" , string.punctuation ) __UpperCAmelCase : Optional[Any] = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase ) __UpperCAmelCase : str = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase ) if ignore_numbers: __UpperCAmelCase : Union[str, Any] = string.digits.maketrans("""""" , """""" , string.digits ) __UpperCAmelCase : Dict = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = predictions == references return {"exact_match": np.mean(__UpperCAmelCase ) * 100}
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCamelCase = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''OwlViTFeatureExtractor'''] _UpperCamelCase = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
1
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase : List[str] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg""" __UpperCAmelCase : str = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" ) __UpperCAmelCase : Any = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ), ] ) __UpperCAmelCase : int = transform(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ ) return image def lowercase_ ( lowerCAmelCase__ : Any ): """simple docstring""" if "visual_encoder" in key: __UpperCAmelCase : Union[str, Any] = re.sub("""visual_encoder*""" , """vision_model.encoder""" , lowerCAmelCase__ ) if "blocks" in key: __UpperCAmelCase : str = re.sub(r"""blocks""" , """layers""" , lowerCAmelCase__ ) if "attn" in key: __UpperCAmelCase : Tuple = re.sub(r"""attn""" , """self_attn""" , lowerCAmelCase__ ) if "norm1" in key: __UpperCAmelCase : Dict = re.sub(r"""norm1""" , """layer_norm1""" , lowerCAmelCase__ ) if "norm2" in key: __UpperCAmelCase : Optional[Any] = re.sub(r"""norm2""" , """layer_norm2""" , lowerCAmelCase__ ) if "encoder.norm" in key: __UpperCAmelCase : str = re.sub(r"""encoder.norm""" , """post_layernorm""" , lowerCAmelCase__ ) if "encoder.patch_embed.proj" in key: __UpperCAmelCase : Optional[Any] = re.sub(r"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , lowerCAmelCase__ ) if "encoder.pos_embed" in key: __UpperCAmelCase : Optional[int] = re.sub(r"""encoder.pos_embed""" , """embeddings.position_embedding""" , lowerCAmelCase__ ) if "encoder.cls_token" in key: __UpperCAmelCase : str = re.sub(r"""encoder.cls_token""" , """embeddings.class_embedding""" , lowerCAmelCase__ ) if "self_attn" in key: __UpperCAmelCase : List[str] = re.sub(r"""self_attn.proj""" , """self_attn.projection""" , lowerCAmelCase__ ) return key @torch.no_grad() def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : int=None ): """simple docstring""" if config_path is not None: __UpperCAmelCase : List[Any] = BlipConfig.from_pretrained(lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[int] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) __UpperCAmelCase : Optional[int] = BlipForConditionalGeneration(lowerCAmelCase__ ).eval() __UpperCAmelCase : str = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth""" __UpperCAmelCase : Optional[int] = blip_decoder(pretrained=lowerCAmelCase__ , image_size=384 , vit="""base""" ) __UpperCAmelCase : List[str] = pt_model.eval() __UpperCAmelCase : List[str] = pt_model.state_dict() for key in modified_state_dict.copy(): __UpperCAmelCase : Tuple = modified_state_dict.pop(lowerCAmelCase__ ) __UpperCAmelCase : int = rename_key(lowerCAmelCase__ ) __UpperCAmelCase : List[Any] = value hf_model.load_state_dict(lowerCAmelCase__ ) __UpperCAmelCase : Tuple = 384 __UpperCAmelCase : Union[str, Any] = load_demo_image(image_size=lowerCAmelCase__ , device="""cpu""" ) __UpperCAmelCase : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" ) __UpperCAmelCase : Optional[Any] = tokenizer(["""a picture of"""] ).input_ids __UpperCAmelCase : str = hf_model.generate(lowerCAmelCase__ , lowerCAmelCase__ ) assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] __UpperCAmelCase : int = hf_model.generate(lowerCAmelCase__ ) assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowerCAmelCase__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' __UpperCAmelCase : Union[str, Any] = ( """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth""" ) __UpperCAmelCase : Optional[Any] = blip_vqa(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit="""base""" ) vqa_model.eval() __UpperCAmelCase : List[Any] = vqa_model.state_dict() for key in modified_state_dict.copy(): __UpperCAmelCase : List[Any] = modified_state_dict.pop(lowerCAmelCase__ ) __UpperCAmelCase : str = rename_key(lowerCAmelCase__ ) __UpperCAmelCase : Union[str, Any] = value __UpperCAmelCase : List[str] = BlipForQuestionAnswering(lowerCAmelCase__ ) hf_vqa_model.load_state_dict(lowerCAmelCase__ ) __UpperCAmelCase : List[Any] = ["""How many dogs are in this image?"""] __UpperCAmelCase : Optional[Any] = tokenizer(lowerCAmelCase__ , return_tensors="""pt""" ).input_ids __UpperCAmelCase : str = hf_vqa_model.generate(lowerCAmelCase__ , lowerCAmelCase__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" ) __UpperCAmelCase : Union[str, Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth""" __UpperCAmelCase : List[str] = blip_itm(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit="""base""" ) itm_model.eval() __UpperCAmelCase : Dict = itm_model.state_dict() for key in modified_state_dict.copy(): __UpperCAmelCase : int = modified_state_dict.pop(lowerCAmelCase__ ) __UpperCAmelCase : Tuple = rename_key(lowerCAmelCase__ ) __UpperCAmelCase : List[Any] = value __UpperCAmelCase : Dict = BlipForImageTextRetrieval(lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = ["""A picture of a woman with a dog sitting in a beach"""] __UpperCAmelCase : List[str] = tokenizer( lowerCAmelCase__ , return_tensors="""pt""" , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(lowerCAmelCase__ ) hf_itm_model.eval() __UpperCAmelCase : List[Any] = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ ) __UpperCAmelCase : Any = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ ) assert out[0].item() == 0.2_110_687_494_277_954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') _UpperCamelCase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
16
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor _UpperCamelCase = logging.get_logger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
16
1
'''simple docstring''' from __future__ import annotations from typing import Any class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = row, column __UpperCAmelCase : Union[str, Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )] def __str__( self ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier __UpperCAmelCase : Optional[Any] = 0 for row_vector in self.array: for obj in row_vector: __UpperCAmelCase : Union[str, Any] = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) ) __UpperCAmelCase : Optional[int] = f'%{max_element_length}s' # Make string and return def single_line(__UpperCAmelCase ) -> str: nonlocal string_format_identifier __UpperCAmelCase : Any = """[""" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array ) return s def __repr__( self ) -> str: '''simple docstring''' return str(self ) def __A ( self , __UpperCAmelCase ) -> bool: '''simple docstring''' if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , __UpperCAmelCase ) -> Any: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = value def __add__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == another.row and self.column == another.column # Add __UpperCAmelCase : Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : Dict = -self[r, c] return result def __sub__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' return self + (-another) def __mul__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication __UpperCAmelCase : Optional[int] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] * another return result elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication assert self.column == another.row __UpperCAmelCase : Dict = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __UpperCAmelCase : List[Any] = f'Unsupported type given for another ({type(__UpperCAmelCase )})' raise TypeError(__UpperCAmelCase ) def __A ( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Dict = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[str] = self[r, c] return result def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __UpperCAmelCase : Optional[Any] = v.transpose() __UpperCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Dict = Matrix(3 , 3 , 0 ) for i in range(3 ): __UpperCAmelCase : Tuple = 1 print(f'a^(-1) is {ainv}' ) # u, v __UpperCAmelCase : Dict = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 1, 2, -3 __UpperCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = 4, -2, 5 print(f'u is {u}' ) print(f'v is {v}' ) print(f'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}' ) def lowercase_ ( ): """simple docstring""" import doctest doctest.testmod() testa()
16
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _SCREAMING_SNAKE_CASE : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) __UpperCAmelCase : List[Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) __UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : Dict = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) __UpperCAmelCase : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' import torch __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = pipeline("""text-classification""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : Any = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : int = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_classifier, ["HuggingFace is in", "This is another test"] def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : int = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __UpperCAmelCase : Union[str, Any] = """HuggingFace is in""" __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) __UpperCAmelCase : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""] __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase ) __UpperCAmelCase : Any = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , ) __UpperCAmelCase : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} __UpperCAmelCase : Optional[int] = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __UpperCAmelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(__UpperCAmelCase ): text_classifier(__UpperCAmelCase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
16
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : int = "focalnet" def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1E-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = image_size __UpperCAmelCase : Tuple = patch_size __UpperCAmelCase : List[Any] = num_channels __UpperCAmelCase : List[str] = embed_dim __UpperCAmelCase : int = use_conv_embed __UpperCAmelCase : Tuple = hidden_sizes __UpperCAmelCase : int = depths __UpperCAmelCase : List[str] = focal_levels __UpperCAmelCase : Dict = focal_windows __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : Union[str, Any] = mlp_ratio __UpperCAmelCase : List[Any] = hidden_dropout_prob __UpperCAmelCase : Dict = drop_path_rate __UpperCAmelCase : Tuple = use_layerscale __UpperCAmelCase : str = layerscale_value __UpperCAmelCase : str = use_post_layernorm __UpperCAmelCase : int = use_post_layernorm_in_modulation __UpperCAmelCase : List[Any] = normalize_modulator __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : str = layer_norm_eps __UpperCAmelCase : int = encoder_stride __UpperCAmelCase : int = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
16
'''simple docstring''' from ..utils import DummyObject, requires_backends class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] )
16
1
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''vocab_file''': '''vocab.json''', '''tokenizer_config_file''': '''tokenizer_config.json''', '''merges_file''': '''merges.txt''', } _UpperCamelCase = { '''vocab_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json''' ), }, '''tokenizer_config_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json''' ), }, '''merges_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt''' ), }, } _UpperCamelCase = '''</w>''' _UpperCamelCase = '''@@ ''' def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" __UpperCAmelCase : str = set() __UpperCAmelCase : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Optional[Any] = char return pairs # Speech2Text2 has no max input length _UpperCamelCase = {'''facebook/s2t-wav2vec2-large-en-de''': 1024} class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"] def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> List[str]: '''simple docstring''' super().__init__( unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : int = do_lower_case with open(__UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle: __UpperCAmelCase : Optional[int] = json.load(__UpperCAmelCase ) __UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.' ) __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[int] = None else: with open(__UpperCAmelCase , encoding="""utf-8""" ) as merges_handle: __UpperCAmelCase : Optional[Any] = merges_handle.read().split("""\n""" )[:-1] __UpperCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges] __UpperCAmelCase : str = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __UpperCAmelCase : Any = {} @property def __A ( self ) -> int: '''simple docstring''' return len(self.decoder ) def __A ( self ) -> Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] __UpperCAmelCase : Optional[int] = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: __UpperCAmelCase : Optional[Any] = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : Dict = bigram __UpperCAmelCase : str = [] __UpperCAmelCase : Union[str, Any] = 0 while i < len(__UpperCAmelCase ): try: __UpperCAmelCase : Dict = word.index(__UpperCAmelCase , __UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Optional[int] = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : List[Any] = tuple(__UpperCAmelCase ) __UpperCAmelCase : Any = new_word if len(__UpperCAmelCase ) == 1: break else: __UpperCAmelCase : Union[str, Any] = get_pairs(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = """ """.join(__UpperCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: __UpperCAmelCase : Tuple = """\n""" + BPE_TOKEN_MERGES if word.endswith(__UpperCAmelCase ): __UpperCAmelCase : List[str] = word.replace(__UpperCAmelCase , """""" ) __UpperCAmelCase : Optional[int] = word.replace(""" """ , __UpperCAmelCase ) __UpperCAmelCase : Tuple = word return word def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if self.bpe_ranks is None: raise ValueError( """This tokenizer was instantiated without a `merges.txt` file, so""" """ that it can only be used for decoding, not for encoding.""" """Make sure to provide `merges.txt` file at instantiation to enable """ """encoding.""" ) if self.do_lower_case: __UpperCAmelCase : Optional[Any] = text.lower() __UpperCAmelCase : int = text.split() __UpperCAmelCase : Any = [] for token in text: if token: split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) ) return split_tokens def __A ( self , __UpperCAmelCase ) -> int: '''simple docstring''' return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) ) def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.decoder.get(__UpperCAmelCase , self.unk_token ) return result def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = """ """.join(__UpperCAmelCase ) # make sure @@ tokens are concatenated __UpperCAmelCase : Optional[Any] = """""".join(string.split(__UpperCAmelCase ) ) return string def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCAmelCase : Dict = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : Optional[Any] = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + """\n""" ) __UpperCAmelCase : Union[str, Any] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) __UpperCAmelCase : str = token_index writer.write(""" """.join(__UpperCAmelCase ) + """\n""" ) index += 1 return (vocab_file, merges_file)
16
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class _A : def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase ) __UpperCAmelCase : List[str] = length __UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa ) __UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Dict: '''simple docstring''' return self.length def __getitem__( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Any = True def __A ( self , __UpperCAmelCase=None ) -> str: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : Optional[int] = False return x * self.a[0] + self.b[0] class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]: '''simple docstring''' super().__init__() __UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : str = True def __A ( self , __UpperCAmelCase=None ) -> Tuple: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : int = False return x * self.a + self.b def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} __UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" ) __UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )} def tokenize_function(lowerCAmelCase__ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : List[Any] = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" ) if "label" in examples: __UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCAmelCase : Tuple = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCAmelCase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 ) __UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
16
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _SCREAMING_SNAKE_CASE : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) __UpperCAmelCase : List[Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) __UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : Dict = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) __UpperCAmelCase : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' import torch __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = pipeline("""text-classification""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : Any = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : int = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_classifier, ["HuggingFace is in", "This is another test"] def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : int = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __UpperCAmelCase : Union[str, Any] = """HuggingFace is in""" __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) __UpperCAmelCase : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""] __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase ) __UpperCAmelCase : Any = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , ) __UpperCAmelCase : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} __UpperCAmelCase : Optional[int] = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __UpperCAmelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(__UpperCAmelCase ): text_classifier(__UpperCAmelCase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
16
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None @property def __A ( self ) -> Optional[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = (3, 32, 128) __UpperCAmelCase : Tuple = tempfile.mkdtemp() # fmt: off __UpperCAmelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on __UpperCAmelCase : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + """\n""" ) __UpperCAmelCase : List[Any] = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } __UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) return image_input def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = self.get_tokenizer() __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) __UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[str] = self.prepare_image_inputs() __UpperCAmelCase : str = image_processor(__UpperCAmelCase , return_tensors="""np""" ) __UpperCAmelCase : int = processor(images=__UpperCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Dict = """test""" __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = """test""" __UpperCAmelCase : int = self.prepare_image_inputs() __UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase : Optional[Any] = processor.char_decode(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase ) __UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in decoded_tok] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Optional[Any] = self.get_tokenizer() __UpperCAmelCase : Any = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : str = None __UpperCAmelCase : Dict = self.prepare_image_inputs() __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Any = self.get_image_processor() __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : str = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = torch.randn(1 , 27 , 38 ) __UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 50_257 ) __UpperCAmelCase : Any = torch.randn(1 , 27 , 30_522 ) __UpperCAmelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
16
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self ) -> Dict: '''simple docstring''' # test for the above condition self.test() def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = 0 __UpperCAmelCase : Optional[int] = False while not completed: if counter == 1: self.reset() __UpperCAmelCase : str = self.advance() if not self.does_advance(__UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = self.update(__UpperCAmelCase ) counter += 1 if counter > 10_000: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def __A ( self ) -> Dict: '''simple docstring''' raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def __A ( self , __UpperCAmelCase ) -> Dict: '''simple docstring''' raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def __A ( self ) -> str: '''simple docstring''' raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def __A ( self ) -> List[Any]: '''simple docstring''' raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def __A ( self , __UpperCAmelCase=False ) -> int: '''simple docstring''' raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' super(__UpperCAmelCase , self ).__init__() if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or len(__UpperCAmelCase ) == 0: raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' ) if any((not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' ) __UpperCAmelCase : Tuple = token_ids __UpperCAmelCase : Dict = len(self.token_ids ) __UpperCAmelCase : Dict = -1 # the index of the currently fulfilled step __UpperCAmelCase : Optional[int] = False def __A ( self ) -> str: '''simple docstring''' if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__UpperCAmelCase )}' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __A ( self , __UpperCAmelCase ) -> Any: '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__UpperCAmelCase )}' ) __UpperCAmelCase : str = False __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[Any] = False if self.does_advance(__UpperCAmelCase ): self.fulfilled_idx += 1 __UpperCAmelCase : Tuple = True if self.fulfilled_idx == (self.seqlen - 1): __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Tuple = completed else: # failed to make progress. __UpperCAmelCase : Union[str, Any] = True self.reset() return stepped, completed, reset def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : int = 0 def __A ( self ) -> Union[str, Any]: '''simple docstring''' return self.seqlen - (self.fulfilled_idx + 1) def __A ( self , __UpperCAmelCase=False ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = PhrasalConstraint(self.token_ids ) if stateful: __UpperCAmelCase : int = self.seqlen __UpperCAmelCase : List[Any] = self.fulfilled_idx __UpperCAmelCase : Dict = self.completed return new_constraint class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : str = max([len(__UpperCAmelCase ) for one in nested_token_ids] ) __UpperCAmelCase : Optional[int] = {} for token_ids in nested_token_ids: __UpperCAmelCase : str = root for tidx, token_id in enumerate(__UpperCAmelCase ): if token_id not in level: __UpperCAmelCase : int = {} __UpperCAmelCase : List[Any] = level[token_id] if no_subsets and self.has_subsets(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f' {nested_token_ids}.' ) __UpperCAmelCase : Optional[int] = root def __A ( self , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : Dict = self.trie for current_token in current_seq: __UpperCAmelCase : int = start[current_token] __UpperCAmelCase : Dict = list(start.keys() ) return next_tokens def __A ( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.next_tokens(__UpperCAmelCase ) return len(__UpperCAmelCase ) == 0 def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = list(root.values() ) if len(__UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(__UpperCAmelCase ) for nn in next_nodes] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = self.count_leaves(__UpperCAmelCase ) return len(__UpperCAmelCase ) != leaf_count class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' super(__UpperCAmelCase , self ).__init__() if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or len(__UpperCAmelCase ) == 0: raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' ) if any(not isinstance(__UpperCAmelCase , __UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' ) if any( any((not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' ) __UpperCAmelCase : int = DisjunctiveTrie(__UpperCAmelCase ) __UpperCAmelCase : Tuple = nested_token_ids __UpperCAmelCase : Tuple = self.trie.max_height __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : Optional[Any] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[str] = self.trie.next_tokens(self.current_seq ) if len(__UpperCAmelCase ) == 0: return None else: return token_list def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__UpperCAmelCase )}' ) __UpperCAmelCase : int = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__UpperCAmelCase )}' ) __UpperCAmelCase : Dict = False __UpperCAmelCase : int = False __UpperCAmelCase : List[Any] = False if self.does_advance(__UpperCAmelCase ): self.current_seq.append(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = True else: __UpperCAmelCase : List[str] = True self.reset() __UpperCAmelCase : List[str] = self.trie.reached_leaf(self.current_seq ) __UpperCAmelCase : int = completed return stepped, completed, reset def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : List[str] = False __UpperCAmelCase : Dict = [] def __A ( self ) -> Tuple: '''simple docstring''' if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __A ( self , __UpperCAmelCase=False ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = DisjunctiveConstraint(self.token_ids ) if stateful: __UpperCAmelCase : Any = self.seqlen __UpperCAmelCase : Optional[Any] = self.current_seq __UpperCAmelCase : Tuple = self.completed return new_constraint class _A : def __init__( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = constraints # max # of steps required to fulfill a given constraint __UpperCAmelCase : int = max([c.seqlen for c in constraints] ) __UpperCAmelCase : str = len(__UpperCAmelCase ) __UpperCAmelCase : int = False self.init_state() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : int = None __UpperCAmelCase : Any = [constraint.copy(stateful=__UpperCAmelCase ) for constraint in self.constraints] def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __UpperCAmelCase : Union[str, Any] = constraint.advance() if isinstance(__UpperCAmelCase , __UpperCAmelCase ): token_list.append(__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): token_list.extend(__UpperCAmelCase ) else: __UpperCAmelCase : List[Any] = self.inprogress_constraint.advance() if isinstance(__UpperCAmelCase , __UpperCAmelCase ): token_list.append(__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): token_list.extend(__UpperCAmelCase ) if len(__UpperCAmelCase ) == 0: return None else: return token_list def __A ( self , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.add(__UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' ) __UpperCAmelCase , __UpperCAmelCase : Dict = False, False if self.completed: __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Union[str, Any] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = self.inprogress_constraint.update(__UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__UpperCAmelCase ) ) __UpperCAmelCase : int = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __UpperCAmelCase : Dict = None if len(self.pending_constraints ) == 0: # we're done! __UpperCAmelCase : Tuple = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(__UpperCAmelCase ): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = pending_constraint.update(__UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(__UpperCAmelCase ) __UpperCAmelCase : Tuple = None if not complete and stepped: __UpperCAmelCase : Dict = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __UpperCAmelCase : Tuple = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __UpperCAmelCase : Union[str, Any] = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __A ( self , __UpperCAmelCase=True ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __UpperCAmelCase : Dict = [ constraint.copy(stateful=__UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __UpperCAmelCase : Dict = self.inprogress_constraint.copy(stateful=__UpperCAmelCase ) __UpperCAmelCase : Tuple = [constraint.copy() for constraint in self.pending_constraints] return new_state
16
'''simple docstring''' from collections.abc import Sequence def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ): """simple docstring""" if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) __UpperCAmelCase : Any = nums[0] for i in range(1 , len(lowerCAmelCase__ ) ): __UpperCAmelCase : Union[str, Any] = nums[i] __UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _UpperCamelCase = int(input('''Enter number of elements : ''').strip()) _UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
16
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig _UpperCamelCase = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = "tapas" def __init__( self , __UpperCAmelCase=30_522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_024 , __UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase=10.0 , __UpperCAmelCase=0 , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=1.0 , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase="ratio" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __UpperCAmelCase : Optional[int] = vocab_size __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Tuple = num_attention_heads __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : Any = intermediate_size __UpperCAmelCase : Any = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : Any = max_position_embeddings __UpperCAmelCase : str = type_vocab_sizes __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : Optional[Any] = layer_norm_eps # Fine-tuning task hyperparameters __UpperCAmelCase : Tuple = positive_label_weight __UpperCAmelCase : Any = num_aggregation_labels __UpperCAmelCase : int = aggregation_loss_weight __UpperCAmelCase : Optional[Any] = use_answer_as_supervision __UpperCAmelCase : str = answer_loss_importance __UpperCAmelCase : Any = use_normalized_answer_loss __UpperCAmelCase : Union[str, Any] = huber_loss_delta __UpperCAmelCase : Union[str, Any] = temperature __UpperCAmelCase : Optional[Any] = aggregation_temperature __UpperCAmelCase : Dict = use_gumbel_for_cells __UpperCAmelCase : List[str] = use_gumbel_for_aggregation __UpperCAmelCase : Optional[Any] = average_approximation_function __UpperCAmelCase : Optional[Any] = cell_selection_preference __UpperCAmelCase : List[Any] = answer_loss_cutoff __UpperCAmelCase : List[Any] = max_num_rows __UpperCAmelCase : List[Any] = max_num_columns __UpperCAmelCase : List[Any] = average_logits_per_cell __UpperCAmelCase : List[Any] = select_one_column __UpperCAmelCase : str = allow_empty_column_selection __UpperCAmelCase : Dict = init_cell_selection_weights_to_zero __UpperCAmelCase : Tuple = reset_position_index_per_cell __UpperCAmelCase : int = disable_per_token_loss # Aggregation hyperparameters __UpperCAmelCase : Any = aggregation_labels __UpperCAmelCase : Dict = no_aggregation_label_index if isinstance(self.aggregation_labels , __UpperCAmelCase ): __UpperCAmelCase : List[str] = {int(__UpperCAmelCase ): v for k, v in aggregation_labels.items()}
16
'''simple docstring''' class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = data __UpperCAmelCase : int = previous __UpperCAmelCase : Union[str, Any] = next_node def __str__( self ) -> str: '''simple docstring''' return f'{self.data}' def __A ( self ) -> int: '''simple docstring''' return self.data def __A ( self ) -> List[str]: '''simple docstring''' return self.next def __A ( self ) -> str: '''simple docstring''' return self.previous class _A : def __init__( self , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = head def __iter__( self ) -> str: '''simple docstring''' return self def __A ( self ) -> str: '''simple docstring''' if not self.current: raise StopIteration else: __UpperCAmelCase : List[str] = self.current.get_data() __UpperCAmelCase : int = self.current.get_next() return value class _A : def __init__( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = None # First node in list __UpperCAmelCase : List[str] = None # Last node in list def __str__( self ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = self.head __UpperCAmelCase : Optional[int] = [] while current is not None: nodes.append(current.get_data() ) __UpperCAmelCase : Any = current.get_next() return " ".join(str(__UpperCAmelCase ) for node in nodes ) def __contains__( self , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.head while current: if current.get_data() == value: return True __UpperCAmelCase : Optional[Any] = current.get_next() return False def __iter__( self ) -> str: '''simple docstring''' return LinkedListIterator(self.head ) def __A ( self ) -> List[Any]: '''simple docstring''' if self.head: return self.head.get_data() return None def __A ( self ) -> Optional[Any]: '''simple docstring''' if self.tail: return self.tail.get_data() return None def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: __UpperCAmelCase : str = node __UpperCAmelCase : List[str] = node else: self.insert_before_node(self.head , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: self.set_head(__UpperCAmelCase ) else: self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[int] = Node(__UpperCAmelCase ) if self.head is None: self.set_head(__UpperCAmelCase ) else: self.set_tail(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Tuple = node __UpperCAmelCase : List[Any] = node.previous if node.get_previous() is None: __UpperCAmelCase : str = node_to_insert else: __UpperCAmelCase : Optional[Any] = node_to_insert __UpperCAmelCase : List[Any] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : List[str] = node __UpperCAmelCase : Union[str, Any] = node.next if node.get_next() is None: __UpperCAmelCase : Dict = node_to_insert else: __UpperCAmelCase : Any = node_to_insert __UpperCAmelCase : List[str] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Optional[Any] = Node(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.head while node: if current_position == position: self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase ) return current_position += 1 __UpperCAmelCase : int = node.next self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Node: '''simple docstring''' __UpperCAmelCase : Dict = self.head while node: if node.get_data() == item: return node __UpperCAmelCase : List[str] = node.get_next() raise Exception("""Node not found""" ) def __A ( self , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if (node := self.get_node(__UpperCAmelCase )) is not None: if node == self.head: __UpperCAmelCase : Optional[int] = self.head.get_next() if node == self.tail: __UpperCAmelCase : Union[str, Any] = self.tail.get_previous() self.remove_node_pointers(__UpperCAmelCase ) @staticmethod def __A ( __UpperCAmelCase ) -> None: '''simple docstring''' if node.get_next(): __UpperCAmelCase : Optional[Any] = node.previous if node.get_previous(): __UpperCAmelCase : int = node.next __UpperCAmelCase : Tuple = None __UpperCAmelCase : Union[str, Any] = None def __A ( self ) -> List[Any]: '''simple docstring''' return self.head is None def lowercase_ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
16
1
'''simple docstring''' import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _UpperCamelCase = 4 _UpperCamelCase = 3 class _A ( __SCREAMING_SNAKE_CASE ): pass def lowercase_ ( lowerCAmelCase__ : List[str] ): """simple docstring""" for shard in shards: for i in range(lowerCAmelCase__ ): yield {"i": i, "shard": shard} def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = int(os.environ["""RANK"""] ) __UpperCAmelCase : Optional[Any] = int(os.environ["""WORLD_SIZE"""] ) __UpperCAmelCase : Tuple = ArgumentParser() parser.add_argument("""--streaming""" , type=lowerCAmelCase__ ) parser.add_argument("""--local_rank""" , type=lowerCAmelCase__ ) parser.add_argument("""--num_workers""" , type=lowerCAmelCase__ , default=0 ) __UpperCAmelCase : Dict = parser.parse_args() __UpperCAmelCase : Dict = args.streaming __UpperCAmelCase : Optional[int] = args.num_workers __UpperCAmelCase : str = {"""shards""": [f'shard_{shard_idx}' for shard_idx in range(lowerCAmelCase__ )]} __UpperCAmelCase : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ ) if not streaming: __UpperCAmelCase : Tuple = Dataset.from_list(list(lowerCAmelCase__ ) ) __UpperCAmelCase : Any = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ ) __UpperCAmelCase : str = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ ) __UpperCAmelCase : List[str] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __UpperCAmelCase : List[Any] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __UpperCAmelCase : List[str] = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' ) if __name__ == "__main__": main()
16
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class _A : _SCREAMING_SNAKE_CASE : List[str] _SCREAMING_SNAKE_CASE : Optional[str] = None # Automatically constructed _SCREAMING_SNAKE_CASE : ClassVar[str] = "dict" _SCREAMING_SNAKE_CASE : ClassVar[Any] = None _SCREAMING_SNAKE_CASE : str = field(default="Translation" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ) -> Any: '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class _A : _SCREAMING_SNAKE_CASE : Optional[List] = None _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : Optional[str] = None # Automatically constructed _SCREAMING_SNAKE_CASE : ClassVar[str] = "dict" _SCREAMING_SNAKE_CASE : ClassVar[Any] = None _SCREAMING_SNAKE_CASE : str = field(default="TranslationVariableLanguages" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = sorted(set(self.languages ) ) if self.languages else None __UpperCAmelCase : int = len(self.languages ) if self.languages else None def __call__( self ) -> Optional[Any]: '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def __A ( self , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = set(self.languages ) if self.languages and set(__UpperCAmelCase ) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(__UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__UpperCAmelCase )}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __UpperCAmelCase : Dict = [] for lang, text in translation_dict.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = zip(*sorted(__UpperCAmelCase ) ) return {"language": languages, "translation": translations} def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
16
1
'''simple docstring''' from statistics import mean import numpy as np def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Tuple = 0 # Number of processes finished __UpperCAmelCase : Optional[int] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __UpperCAmelCase : Tuple = [0] * no_of_process # List to include calculation results __UpperCAmelCase : int = [0] * no_of_process # Sort by arrival time. __UpperCAmelCase : Dict = [burst_time[i] for i in np.argsort(lowerCAmelCase__ )] __UpperCAmelCase : Union[str, Any] = [process_name[i] for i in np.argsort(lowerCAmelCase__ )] arrival_time.sort() while no_of_process > finished_process_count: __UpperCAmelCase : Dict = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __UpperCAmelCase : Any = arrival_time[i] __UpperCAmelCase : Any = 0 # Index showing the location of the process being performed __UpperCAmelCase : Any = 0 # Saves the current response ratio. __UpperCAmelCase : List[str] = 0 for i in range(0 , lowerCAmelCase__ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __UpperCAmelCase : Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __UpperCAmelCase : Tuple = temp __UpperCAmelCase : List[str] = i # Calculate the turn around time __UpperCAmelCase : Tuple = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __UpperCAmelCase : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Optional[int] = [0] * no_of_process for i in range(0 , lowerCAmelCase__ ): __UpperCAmelCase : List[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _UpperCamelCase = 5 _UpperCamelCase = ['''A''', '''B''', '''C''', '''D''', '''E'''] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _UpperCamelCase = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
16
'''simple docstring''' from statistics import mean import numpy as np def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Tuple = 0 # Number of processes finished __UpperCAmelCase : Optional[int] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __UpperCAmelCase : Tuple = [0] * no_of_process # List to include calculation results __UpperCAmelCase : int = [0] * no_of_process # Sort by arrival time. __UpperCAmelCase : Dict = [burst_time[i] for i in np.argsort(lowerCAmelCase__ )] __UpperCAmelCase : Union[str, Any] = [process_name[i] for i in np.argsort(lowerCAmelCase__ )] arrival_time.sort() while no_of_process > finished_process_count: __UpperCAmelCase : Dict = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __UpperCAmelCase : Any = arrival_time[i] __UpperCAmelCase : Any = 0 # Index showing the location of the process being performed __UpperCAmelCase : Any = 0 # Saves the current response ratio. __UpperCAmelCase : List[str] = 0 for i in range(0 , lowerCAmelCase__ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __UpperCAmelCase : Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __UpperCAmelCase : Tuple = temp __UpperCAmelCase : List[str] = i # Calculate the turn around time __UpperCAmelCase : Tuple = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __UpperCAmelCase : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Optional[int] = [0] * no_of_process for i in range(0 , lowerCAmelCase__ ): __UpperCAmelCase : List[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _UpperCamelCase = 5 _UpperCamelCase = ['''A''', '''B''', '''C''', '''D''', '''E'''] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _UpperCamelCase = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
16
1
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' super().__init__(**__UpperCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = {} if "candidate_labels" in kwargs: __UpperCAmelCase : Dict = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __UpperCAmelCase : Union[str, Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." ) -> int: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = load_image(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework ) __UpperCAmelCase : str = candidate_labels __UpperCAmelCase : Dict = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] __UpperCAmelCase : Optional[int] = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase ) __UpperCAmelCase : int = [text_inputs] return inputs def __A ( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = model_inputs.pop("""candidate_labels""" ) __UpperCAmelCase : List[str] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , __UpperCAmelCase ): __UpperCAmelCase : Tuple = text_inputs[0] else: # Batching case. __UpperCAmelCase : Union[str, Any] = text_inputs[0][0] __UpperCAmelCase : Optional[int] = self.model(**__UpperCAmelCase , **__UpperCAmelCase ) __UpperCAmelCase : int = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = model_outputs.pop("""candidate_labels""" ) __UpperCAmelCase : Optional[Any] = model_outputs["""logits"""][0] if self.framework == "pt": __UpperCAmelCase : Optional[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) __UpperCAmelCase : List[Any] = probs.tolist() if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): __UpperCAmelCase : List[str] = [scores] elif self.framework == "tf": __UpperCAmelCase : str = stable_softmax(__UpperCAmelCase , axis=-1 ) __UpperCAmelCase : Any = probs.numpy().tolist() else: raise ValueError(f'Unsupported framework: {self.framework}' ) __UpperCAmelCase : List[str] = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] ) ] return result
16
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : Union[str, Any] = seq_length __UpperCAmelCase : int = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[str] = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : str = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : Dict = type_sequence_label_size __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : Optional[Any] = num_choices __UpperCAmelCase : int = scope def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[Any] = None if self.use_input_mask: __UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Optional[int] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> List[str]: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_config() __UpperCAmelCase : List[Any] = 300 return config def __A ( self ) -> Dict: '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = self.prepare_config_and_inputs() __UpperCAmelCase : Tuple = True __UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : List[str] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[Any] = config_and_inputs __UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : int = False _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Dict = () def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = MraModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : List[Any] = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Any: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""MRA does not output attentions""" ) def __A ( self ) -> List[Any]: '''simple docstring''' return @require_torch class _A ( unittest.TestCase ): @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0] __UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : int = model(__UpperCAmelCase )[0] __UpperCAmelCase : Union[str, Any] = 50_265 __UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : Any = model(__UpperCAmelCase )[0] __UpperCAmelCase : Dict = 50_265 __UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : str = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _A : _SCREAMING_SNAKE_CASE : Optional[int] = MBartConfig _SCREAMING_SNAKE_CASE : List[Any] = {} _SCREAMING_SNAKE_CASE : int = "gelu" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Any = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : Any = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : Any = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Dict = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : Union[str, Any] = eos_token_id __UpperCAmelCase : Any = pad_token_id __UpperCAmelCase : Any = bos_token_id def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __UpperCAmelCase : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : List[Any] = prepare_mbart_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : Dict = TFMBartModel(config=__UpperCAmelCase ).get_decoder() __UpperCAmelCase : Dict = inputs_dict["""input_ids"""] __UpperCAmelCase : List[str] = input_ids[:1, :] __UpperCAmelCase : Any = inputs_dict["""attention_mask"""][:1, :] __UpperCAmelCase : Tuple = inputs_dict["""head_mask"""] __UpperCAmelCase : Tuple = 1 # first forward pass __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = outputs.to_tuple() __UpperCAmelCase : str = past_key_values[1] def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : str=None , ): """simple docstring""" if attention_mask is None: __UpperCAmelCase : Any = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __UpperCAmelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () _SCREAMING_SNAKE_CASE : Union[str, Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else () _SCREAMING_SNAKE_CASE : List[Any] = ( { "conversational": TFMBartForConditionalGeneration, "feature-extraction": TFMBartModel, "summarization": TFMBartForConditionalGeneration, "text2text-generation": TFMBartForConditionalGeneration, "translation": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : Dict = False _SCREAMING_SNAKE_CASE : Optional[Any] = False def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = TFMBartModelTester(self ) __UpperCAmelCase : int = ConfigTester(self , config_class=__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : int = [ " UN Chief Says There Is No Military Solution in Syria", ] _SCREAMING_SNAKE_CASE : Dict = [ "Şeful ONU declară că nu există o soluţie militară în Siria", ] _SCREAMING_SNAKE_CASE : Any = "facebook/mbart-large-en-ro" @cached_property def __A ( self ) -> int: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __A ( self , **__UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.translate_src_text(**__UpperCAmelCase ) self.assertListEqual(self.expected_text , __UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = self.tokenizer(self.src_text , **__UpperCAmelCase , return_tensors="""tf""" ) __UpperCAmelCase : Any = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __UpperCAmelCase : Dict = self.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def __A ( self ) -> Tuple: '''simple docstring''' self._assert_generated_batch_equal_expected()
16
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Any = image_size __UpperCAmelCase : Dict = patch_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : List[Any] = embed_dim __UpperCAmelCase : str = depths __UpperCAmelCase : Dict = num_heads __UpperCAmelCase : str = window_size __UpperCAmelCase : int = mlp_ratio __UpperCAmelCase : Union[str, Any] = qkv_bias __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = drop_path_rate __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Optional[int] = use_absolute_embeddings __UpperCAmelCase : Any = patch_norm __UpperCAmelCase : Union[str, Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : Any = scope __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : Optional[int] = type_sequence_label_size __UpperCAmelCase : int = encoder_stride def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Tuple = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __A ( self ) -> Dict: '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = SwinvaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) __UpperCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = SwinvaForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : str = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = self.type_sequence_label_size __UpperCAmelCase : str = SwinvaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs __UpperCAmelCase : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : List[str] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Dict = False _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[str] = SwinvaModelTester(self ) __UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 ) def __A ( self ) -> Any: '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def __A ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def __A ( self ) -> Dict: '''simple docstring''' pass def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Tuple = model_class(__UpperCAmelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : str = [*signature.parameters.keys()] __UpperCAmelCase : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = True for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : int = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : str = outputs.attentions __UpperCAmelCase : Any = len(self.model_tester.depths ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase : Dict = True __UpperCAmelCase : int = config.window_size**2 __UpperCAmelCase : Any = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : Dict = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __UpperCAmelCase : Dict = len(__UpperCAmelCase ) # Check attention is always last and order is fine __UpperCAmelCase : Any = True __UpperCAmelCase : Any = True __UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): __UpperCAmelCase : Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __UpperCAmelCase : Optional[int] = 2 self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) ) __UpperCAmelCase : Tuple = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : List[Any] = outputs.hidden_states __UpperCAmelCase : List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # Swinv2 has a different seq_length __UpperCAmelCase : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __UpperCAmelCase : int = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = reshaped_hidden_states[0].shape __UpperCAmelCase : Any = ( reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = 3 __UpperCAmelCase : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase : int = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Tuple = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = SwinvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCAmelCase : List[Any] = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class _A ( unittest.TestCase ): @cached_property def __A ( self ) -> int: '''simple docstring''' return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( __UpperCAmelCase ) __UpperCAmelCase : Tuple = self.default_image_processor __UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase : Any = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(**__UpperCAmelCase ) # verify the logits __UpperCAmelCase : int = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCamelCase = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''LayoutLMv2FeatureExtractor'''] _UpperCamelCase = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( lowerCAmelCase__ : List[str] ): """simple docstring""" if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase__ ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCAmelCase : int = size if size is not None else {"""shortest_edge""": 256} __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : List[str] = size __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : Any = crop_size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Dict = do_rescale __UpperCAmelCase : List[str] = rescale_factor __UpperCAmelCase : Dict = offset __UpperCAmelCase : List[str] = do_normalize __UpperCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" in size: __UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size["""shortest_edge"""] , default_to_square=__UpperCAmelCase ) elif "height" in size and "width" in size: __UpperCAmelCase : Any = (size["""height"""], size["""width"""]) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : Any = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = image.astype(np.floataa ) if offset: __UpperCAmelCase : Tuple = image - (scale / 2) return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : Optional[Any] = to_numpy_array(__UpperCAmelCase ) if do_resize: __UpperCAmelCase : Optional[int] = self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) if do_center_crop: __UpperCAmelCase : Optional[int] = self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase ) if do_rescale: __UpperCAmelCase : int = self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , offset=__UpperCAmelCase ) if do_normalize: __UpperCAmelCase : List[str] = self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) return image def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' __UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : List[Any] = resample if resample is not None else self.resample __UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : List[Any] = offset if offset is not None else self.offset __UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : int = image_std if image_std is not None else self.image_std __UpperCAmelCase : Any = size if size is not None else self.size __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : str = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) if not valid_images(__UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) __UpperCAmelCase : int = make_batched(__UpperCAmelCase ) __UpperCAmelCase : Tuple = [ [ self._preprocess_image( image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , offset=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , ) for img in video ] for video in videos ] __UpperCAmelCase : Tuple = {"""pixel_values""": videos} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
16
1
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : int = 100 ): """simple docstring""" __UpperCAmelCase : str = n * (n + 1) * (2 * n + 1) / 6 __UpperCAmelCase : int = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
16
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Dict = LDMTextToImagePipeline _SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } _SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } _SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) __UpperCAmelCase : Any = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __UpperCAmelCase : Tuple = CLIPTextModel(__UpperCAmelCase ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __UpperCAmelCase : Dict = { """unet""": unet, """scheduler""": scheduler, """vqvae""": vae, """bert""": text_encoder, """tokenizer""": tokenizer, } return components def __A ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Any: '''simple docstring''' if str(__UpperCAmelCase ).startswith("""mps""" ): __UpperCAmelCase : int = torch.manual_seed(__UpperCAmelCase ) else: __UpperCAmelCase : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCAmelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : Dict = self.get_dummy_components() __UpperCAmelCase : Tuple = LDMTextToImagePipeline(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = pipe(**__UpperCAmelCase ).images __UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __UpperCAmelCase : Dict = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : int = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) __UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) __UpperCAmelCase : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.get_inputs(__UpperCAmelCase ) __UpperCAmelCase : int = pipe(**__UpperCAmelCase ).images __UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) __UpperCAmelCase : Tuple = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) __UpperCAmelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) __UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = self.get_inputs(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = pipe(**__UpperCAmelCase ).images[0] __UpperCAmelCase : Tuple = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) __UpperCAmelCase : Dict = np.abs(expected_image - image ).max() assert max_diff < 1E-3
16
1
'''simple docstring''' from collections.abc import Sequence def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ): """simple docstring""" if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) __UpperCAmelCase : Any = nums[0] for i in range(1 , len(lowerCAmelCase__ ) ): __UpperCAmelCase : Union[str, Any] = nums[i] __UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _UpperCamelCase = int(input('''Enter number of elements : ''').strip()) _UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
16
'''simple docstring''' from __future__ import annotations from typing import Any class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = row, column __UpperCAmelCase : Union[str, Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )] def __str__( self ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier __UpperCAmelCase : Optional[Any] = 0 for row_vector in self.array: for obj in row_vector: __UpperCAmelCase : Union[str, Any] = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) ) __UpperCAmelCase : Optional[int] = f'%{max_element_length}s' # Make string and return def single_line(__UpperCAmelCase ) -> str: nonlocal string_format_identifier __UpperCAmelCase : Any = """[""" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array ) return s def __repr__( self ) -> str: '''simple docstring''' return str(self ) def __A ( self , __UpperCAmelCase ) -> bool: '''simple docstring''' if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , __UpperCAmelCase ) -> Any: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = value def __add__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == another.row and self.column == another.column # Add __UpperCAmelCase : Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : Dict = -self[r, c] return result def __sub__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' return self + (-another) def __mul__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication __UpperCAmelCase : Optional[int] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] * another return result elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication assert self.column == another.row __UpperCAmelCase : Dict = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __UpperCAmelCase : List[Any] = f'Unsupported type given for another ({type(__UpperCAmelCase )})' raise TypeError(__UpperCAmelCase ) def __A ( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Dict = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[str] = self[r, c] return result def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __UpperCAmelCase : Optional[Any] = v.transpose() __UpperCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Dict = Matrix(3 , 3 , 0 ) for i in range(3 ): __UpperCAmelCase : Tuple = 1 print(f'a^(-1) is {ainv}' ) # u, v __UpperCAmelCase : Dict = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 1, 2, -3 __UpperCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = 4, -2, 5 print(f'u is {u}' ) print(f'v is {v}' ) print(f'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}' ) def lowercase_ ( ): """simple docstring""" import doctest doctest.testmod() testa()
16
1
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ): """simple docstring""" while b: __UpperCAmelCase , __UpperCAmelCase : List[Any] = b, a % b return a def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase__ , a % b ) def lowercase_ ( ): """simple docstring""" print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' ) print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' ) print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' ) print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' ) print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' ) print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' ) print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' ) print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' ) print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' ) print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' ) if __name__ == "__main__": main()
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCamelCase = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : List[str]=False ): """simple docstring""" __UpperCAmelCase : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') ) # embeddings rename_keys.extend( [ # text embeddings ("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""), ( """text_embeddings.position_embeddings.weight""", """vilt.embeddings.text_embeddings.position_embeddings.weight""", ), ("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""), ( """text_embeddings.token_type_embeddings.weight""", """vilt.embeddings.text_embeddings.token_type_embeddings.weight""", ), ("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""), ("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""), # patch embeddings ("""transformer.cls_token""", """vilt.embeddings.cls_token"""), ("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""), ("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""), ("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""), # token type embeddings ("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""), ] ) # final layernorm + pooler rename_keys.extend( [ ("""transformer.norm.weight""", """vilt.layernorm.weight"""), ("""transformer.norm.bias""", """vilt.layernorm.bias"""), ("""pooler.dense.weight""", """vilt.pooler.dense.weight"""), ("""pooler.dense.bias""", """vilt.pooler.dense.bias"""), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ("""vqa_classifier.0.weight""", """classifier.0.weight"""), ("""vqa_classifier.0.bias""", """classifier.0.bias"""), ("""vqa_classifier.1.weight""", """classifier.1.weight"""), ("""vqa_classifier.1.bias""", """classifier.1.bias"""), ("""vqa_classifier.3.weight""", """classifier.3.weight"""), ("""vqa_classifier.3.bias""", """classifier.3.bias"""), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ("""nlvr2_classifier.0.weight""", """classifier.0.weight"""), ("""nlvr2_classifier.0.bias""", """classifier.0.bias"""), ("""nlvr2_classifier.1.weight""", """classifier.1.weight"""), ("""nlvr2_classifier.1.bias""", """classifier.1.bias"""), ("""nlvr2_classifier.3.weight""", """classifier.3.weight"""), ("""nlvr2_classifier.3.bias""", """classifier.3.bias"""), ] ) else: pass return rename_keys def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ): """simple docstring""" for i in range(config.num_hidden_layers ): __UpperCAmelCase : Optional[int] = """vilt.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __UpperCAmelCase : Tuple = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' ) __UpperCAmelCase : List[str] = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] __UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] __UpperCAmelCase : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __UpperCAmelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] __UpperCAmelCase : List[str] = in_proj_bias[-config.hidden_size :] def lowercase_ ( lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : int = dct.pop(lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = val @torch.no_grad() def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Any = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = False __UpperCAmelCase : str = False __UpperCAmelCase : str = False __UpperCAmelCase : Union[str, Any] = False if "vqa" in checkpoint_url: __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Optional[Any] = 3129 __UpperCAmelCase : Tuple = """huggingface/label-files""" __UpperCAmelCase : int = """vqa2-id2label.json""" __UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : Union[str, Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} __UpperCAmelCase : Optional[Any] = idalabel __UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()} __UpperCAmelCase : str = ViltForQuestionAnswering(lowerCAmelCase__ ) elif "nlvr" in checkpoint_url: __UpperCAmelCase : Tuple = True __UpperCAmelCase : Union[str, Any] = 2 __UpperCAmelCase : Union[str, Any] = {0: """False""", 1: """True"""} __UpperCAmelCase : Optional[Any] = {v: k for k, v in config.idalabel.items()} __UpperCAmelCase : str = 3 __UpperCAmelCase : List[Any] = ViltForImagesAndTextClassification(lowerCAmelCase__ ) elif "irtr" in checkpoint_url: __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = ViltForImageAndTextRetrieval(lowerCAmelCase__ ) elif "mlm_itm" in checkpoint_url: __UpperCAmelCase : List[Any] = True __UpperCAmelCase : Optional[Any] = ViltForMaskedLM(lowerCAmelCase__ ) else: raise ValueError("""Unknown model type""" ) # load state_dict of original model, remove and rename some keys __UpperCAmelCase : str = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" )["""state_dict"""] __UpperCAmelCase : Dict = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for src, dest in rename_keys: rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ ) if mlm_model or irtr_model: __UpperCAmelCase : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ ) # load state dict into HuggingFace model model.eval() if mlm_model: __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(lowerCAmelCase__ ) # Define processor __UpperCAmelCase : Dict = ViltImageProcessor(size=384 ) __UpperCAmelCase : Optional[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" ) __UpperCAmelCase : Tuple = ViltProcessor(lowerCAmelCase__ , lowerCAmelCase__ ) # Forward pass on example inputs (image + text) if nlvr_model: __UpperCAmelCase : Optional[int] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCAmelCase__ ).raw ) __UpperCAmelCase : Any = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCAmelCase__ ).raw ) __UpperCAmelCase : Tuple = ( """The left image contains twice the number of dogs as the right image, and at least two dogs in total are""" """ standing.""" ) __UpperCAmelCase : Any = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""" ) __UpperCAmelCase : List[str] = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""" ) __UpperCAmelCase : Union[str, Any] = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: __UpperCAmelCase : Optional[int] = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=lowerCAmelCase__ ).raw ) if mlm_model: __UpperCAmelCase : str = """a bunch of [MASK] laying on a [MASK].""" else: __UpperCAmelCase : str = """How many cats are there?""" __UpperCAmelCase : List[Any] = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = model(**lowerCAmelCase__ ) # Verify outputs if mlm_model: __UpperCAmelCase : List[Any] = torch.Size([1, 11, 30522] ) __UpperCAmelCase : List[str] = torch.tensor([-12.5_061, -12.5_123, -12.5_174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) # verify masked token prediction equals "cats" __UpperCAmelCase : Dict = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: __UpperCAmelCase : int = torch.Size([1, 3129] ) __UpperCAmelCase : Tuple = torch.tensor([-15.9_495, -18.1_472, -10.3_041] ) assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) # verify vqa prediction equals "2" __UpperCAmelCase : Optional[int] = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: __UpperCAmelCase : Optional[int] = torch.Size([1, 2] ) __UpperCAmelCase : Optional[int] = torch.tensor([-2.8_721, 2.1_291] ) assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) print(f'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _UpperCamelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
16
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING _SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING def __A ( self ) -> Any: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""}, ] , ) __UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser""", }, ] , ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() __UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow @require_torch def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(__UpperCAmelCase ) @slow @require_tf def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2_201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 12_790, """token_str""": """ Lyon""", }, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) __UpperCAmelCase : Tuple = None __UpperCAmelCase : int = None self.run_pipeline_test(__UpperCAmelCase , [] ) @require_tf def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : str = None self.run_pipeline_test(__UpperCAmelCase , [] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) __UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = [ f'This is another {tokenizer.mask_token} test', ] return fill_masker, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = fill_masker.tokenizer __UpperCAmelCase : Union[str, Any] = fill_masker.model __UpperCAmelCase : Tuple = fill_masker( f'This is a {tokenizer.mask_token}' , ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , ) with self.assertRaises(__UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__UpperCAmelCase ): fill_masker("""This is""" ) self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = tokenizer.get_vocab() __UpperCAmelCase : Dict = sorted(vocab.keys() )[:2] # Pipeline argument __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase ) __UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Any = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Call argument __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Score equivalence __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs] __UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ) == set(__UpperCAmelCase ): __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] ) with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 ) __UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = tokenizer.get_vocab() __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) # top_k=2, ntargets=3 __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results __UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokenizer.get_vocab() # String duplicates + id duplicates __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]] __UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__UpperCAmelCase ) , 3 ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Dict = fill_masker( f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , )
16
1
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" def is_in_circle(lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> bool: __UpperCAmelCase : Optional[int] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __UpperCAmelCase : Optional[Any] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCAmelCase__ ) ) # The ratio of the area for circle to square is pi/4. __UpperCAmelCase : Union[str, Any] = proportion * 4 print(f'The estimated value of pi is {pi_estimate}' ) print(f'The numpy value of pi is {pi}' ) print(f'The total error is {abs(pi - pi_estimate )}' ) def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Callable[[float], float] , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : float = 1.0 , ): """simple docstring""" return mean( function_to_integrate(uniform(lowerCAmelCase__ , lowerCAmelCase__ ) ) for _ in range(lowerCAmelCase__ ) ) * (max_value - min_value) def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : float = 1.0 ): """simple docstring""" def identity_function(lowerCAmelCase__ : float ) -> float: return x __UpperCAmelCase : int = area_under_curve_estimator( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = (max_value * max_value - min_value * min_value) / 2 print("""******************""" ) print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' ) print(f'Estimated value is {estimated_value}' ) print(f'Expected value is {expected_value}' ) print(f'Total error is {abs(estimated_value - expected_value )}' ) print("""******************""" ) def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" def function_to_integrate(lowerCAmelCase__ : float ) -> float: return sqrt(4.0 - x * x ) __UpperCAmelCase : List[str] = area_under_curve_estimator( lowerCAmelCase__ , lowerCAmelCase__ , 0.0 , 2.0 ) print("""******************""" ) print("""Estimating pi using area_under_curve_estimator""" ) print(f'Estimated value is {estimated_value}' ) print(f'Expected value is {pi}' ) print(f'Total error is {abs(estimated_value - pi )}' ) print("""******************""" ) if __name__ == "__main__": import doctest doctest.testmod()
16
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"image": Image()} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"labels": ClassLabel} ) _SCREAMING_SNAKE_CASE : str = "image" _SCREAMING_SNAKE_CASE : str = "labels" def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , __UpperCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) __UpperCAmelCase : int = copy.deepcopy(self ) __UpperCAmelCase : str = self.label_schema.copy() __UpperCAmelCase : Optional[Any] = features[self.label_column] __UpperCAmelCase : Optional[int] = label_schema return task_template @property def __A ( self ) -> Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
16
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _A : @staticmethod def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 ) __UpperCAmelCase : Tuple = len(__UpperCAmelCase ) self.assertGreater(__UpperCAmelCase , 0 ) self.assertEqual( __UpperCAmelCase , [ { """score""": ANY(__UpperCAmelCase ), """label""": ANY(__UpperCAmelCase ), """box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )}, } for i in range(__UpperCAmelCase ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> Tuple: '''simple docstring''' pass @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) __UpperCAmelCase : str = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) __UpperCAmelCase : Any = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> List[str]: '''simple docstring''' pass @require_torch @slow def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = 0.2 __UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 2 __UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
16
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Dict = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[Any] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[str] = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = LlamaModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : Optional[Any] = """single_label_classification""" __UpperCAmelCase : int = input_dict["""input_ids"""] __UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : str = """multi_label_classification""" __UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> Dict: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """ __UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" ) __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
16
1
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _UpperCamelCase = logging.getLogger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase=-1 ) -> List[str]: '''simple docstring''' # in NER datasets, the last column is usually reserved for NER label __UpperCAmelCase : Optional[Any] = label_idx def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[InputExample]: '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = mode.value __UpperCAmelCase : List[Any] = os.path.join(__UpperCAmelCase , f'{mode}.txt' ) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Tuple = [] with open(__UpperCAmelCase , encoding="""utf-8""" ) as f: __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Dict = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 __UpperCAmelCase : str = [] __UpperCAmelCase : Any = [] else: __UpperCAmelCase : int = line.split(""" """ ) words.append(splits[0] ) if len(__UpperCAmelCase ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) return examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(__UpperCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: __UpperCAmelCase : Tuple = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(__UpperCAmelCase ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def __A ( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' if path: with open(__UpperCAmelCase , """r""" ) as f: __UpperCAmelCase : Any = f.read().splitlines() if "O" not in labels: __UpperCAmelCase : Tuple = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self ) -> Any: '''simple docstring''' # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __A ( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' if path: with open(__UpperCAmelCase , """r""" ) as f: __UpperCAmelCase : List[Any] = f.read().splitlines() if "O" not in labels: __UpperCAmelCase : Tuple = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _A ( __SCREAMING_SNAKE_CASE ): def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[InputExample]: '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __UpperCAmelCase : Optional[Any] = mode.value __UpperCAmelCase : Union[str, Any] = os.path.join(__UpperCAmelCase , f'{mode}.txt' ) __UpperCAmelCase : int = 1 __UpperCAmelCase : str = [] with open(__UpperCAmelCase , encoding="""utf-8""" ) as f: for sentence in parse_incr(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : List[str] = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 return examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[Any] = 0 for sentence in parse_incr(__UpperCAmelCase ): __UpperCAmelCase : Any = preds_list[example_id] __UpperCAmelCase : Optional[int] = """""" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(__UpperCAmelCase ) example_id += 1 def __A ( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' if path: with open(__UpperCAmelCase , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
16
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip _UpperCamelCase = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ): """simple docstring""" return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths ) def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = [] if args.gold_data_mode == "qa": __UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ ) for answer_list in data[1]: __UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ ) answers.append(lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : str = [[reference] for reference in references] __UpperCAmelCase : Optional[int] = 0 for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ): total += 1 em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __UpperCAmelCase : int = 100.0 * em / total __UpperCAmelCase : Dict = 100.0 * fa / total logger.info(f'F1: {fa:.2f}' ) logger.info(f'EM: {em:.2f}' ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : Tuple = args.k __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = 0 for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] ) __UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k __UpperCAmelCase : List[str] = 100.0 * em / total logger.info(f'Precision@{k}: {em: .2f}' ) def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ): """simple docstring""" def strip_title(lowerCAmelCase__ : Optional[int] ): if title.startswith("""\"""" ): __UpperCAmelCase : List[Any] = title[1:] if title.endswith("""\"""" ): __UpperCAmelCase : int = title[:-1] return title __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device ) __UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ ) __UpperCAmelCase : int = question_enc_outputs[0] __UpperCAmelCase : Dict = rag_model.retriever( lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) __UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) __UpperCAmelCase : Union[str, Any] = [] for docs in all_docs: __UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(lowerCAmelCase__ ) ) return provenance_strings def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ): """simple docstring""" with torch.no_grad(): __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ ) __UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device ) __UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device ) __UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) __UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) if args.print_predictions: for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) ) return answers def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) __UpperCAmelCase : str = parser.parse_args() __UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = {} if args.model_type is None: __UpperCAmelCase : str = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration __UpperCAmelCase : Dict = args.n_docs if args.index_name is not None: __UpperCAmelCase : Union[str, Any] = args.index_name if args.index_path is not None: __UpperCAmelCase : Dict = args.index_path else: __UpperCAmelCase : str = BartForConditionalGeneration __UpperCAmelCase : str = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k __UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) __UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ ) model.retriever.init_retrieval() else: __UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: __UpperCAmelCase : Union[str, Any] = [] for line in tqdm(lowerCAmelCase__ ): questions.append(line.strip() ) if len(lowerCAmelCase__ ) == args.eval_batch_size: __UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" ) preds_file.flush() __UpperCAmelCase : List[str] = [] if len(lowerCAmelCase__ ) > 0: __UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) ) preds_file.flush() score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": _UpperCamelCase = get_args() main(args)
16
1
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" if isinstance(lowerCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_tf class _A : def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' pass def __A ( self ) -> List[str]: '''simple docstring''' pass def __A ( self ) -> Any: '''simple docstring''' pass def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=__UpperCAmelCase , text_model=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : List[str] = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : str = {"""vision_model""": vision_model, """text_model""": text_model} __UpperCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=__UpperCAmelCase , text_model=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Dict = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase ) __UpperCAmelCase : Dict = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = after_output[0].numpy() __UpperCAmelCase : List[str] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1E-5 ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : int = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=__UpperCAmelCase , text_model=__UpperCAmelCase ) __UpperCAmelCase : Any = model( input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase ) __UpperCAmelCase : Tuple = output.vision_model_output.attentions self.assertEqual(len(__UpperCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __UpperCAmelCase : Any = to_atuple(vision_model.config.image_size ) __UpperCAmelCase : List[str] = to_atuple(vision_model.config.patch_size ) __UpperCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __UpperCAmelCase : Union[str, Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __UpperCAmelCase : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(__UpperCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = np.abs((a - b) ).max() self.assertLessEqual(__UpperCAmelCase , __UpperCAmelCase , f'Difference between torch and flax is {diff} (>= {tol}).' ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**__UpperCAmelCase ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__UpperCAmelCase ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = self.prepare_config_and_inputs() self.check_save_load(**__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__UpperCAmelCase ) @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.get_pretrained_model_and_inputs() __UpperCAmelCase : List[Any] = model_a(**__UpperCAmelCase ) __UpperCAmelCase : str = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__UpperCAmelCase ) __UpperCAmelCase : List[str] = TFVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase ) __UpperCAmelCase : List[str] = model_a(**__UpperCAmelCase ) __UpperCAmelCase : Dict = after_outputs[0].numpy() __UpperCAmelCase : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1E-5 ) @require_tf class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) __UpperCAmelCase : Any = 13 __UpperCAmelCase : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __UpperCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __UpperCAmelCase : Optional[Any] = random_attention_mask([batch_size, 4] ) __UpperCAmelCase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = TFViTModel(__UpperCAmelCase , name="""vision_model""" ) __UpperCAmelCase : Any = TFBertModel(__UpperCAmelCase , name="""text_model""" ) return vision_model, text_model def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Dict = TFViTModelTester(self ) __UpperCAmelCase : Dict = TFBertModelTester(self ) __UpperCAmelCase : Optional[int] = vit_model_tester.prepare_config_and_inputs() __UpperCAmelCase : str = bert_model_tester.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = vision_config_and_inputs ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : int = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): def __A ( self ) -> str: '''simple docstring''' # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. __UpperCAmelCase : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) __UpperCAmelCase : Optional[int] = 13 __UpperCAmelCase : Union[str, Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __UpperCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __UpperCAmelCase : Union[str, Any] = random_attention_mask([batch_size, 4] ) __UpperCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : int = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : List[str] = TFVisionTextDualEncoderModel(vision_model=__UpperCAmelCase , text_model=__UpperCAmelCase ) __UpperCAmelCase : List[str] = model( input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = output.vision_model_output.attentions self.assertEqual(len(__UpperCAmelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __UpperCAmelCase : str = to_atuple(vision_model.config.image_size ) __UpperCAmelCase : List[str] = to_atuple(vision_model.config.patch_size ) __UpperCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __UpperCAmelCase : Optional[int] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __UpperCAmelCase : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(__UpperCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = TFDeiTModel(__UpperCAmelCase , name="""vision_model""" ) __UpperCAmelCase : Optional[Any] = TFRobertaModel(__UpperCAmelCase , name="""text_model""" ) return vision_model, text_model def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = TFDeiTModelTester(self ) __UpperCAmelCase : Any = TFRobertaModelTester(self ) __UpperCAmelCase : Tuple = vit_model_tester.prepare_config_and_inputs() __UpperCAmelCase : List[Any] = bert_model_tester.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = vision_config_and_inputs ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) __UpperCAmelCase : Tuple = 13 __UpperCAmelCase : Dict = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __UpperCAmelCase : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __UpperCAmelCase : Tuple = random_attention_mask([batch_size, 4] ) __UpperCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : str = TFCLIPVisionModel(__UpperCAmelCase , name="""vision_model""" ) __UpperCAmelCase : Optional[Any] = TFBertModel(__UpperCAmelCase , name="""text_model""" ) return vision_model, text_model def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFCLIPVisionModelTester(self ) __UpperCAmelCase : Optional[Any] = TFBertModelTester(self ) __UpperCAmelCase : List[Any] = clip_model_tester.prepare_config_and_inputs() __UpperCAmelCase : List[Any] = bert_model_tester.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = vision_config_and_inputs ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : int = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class _A ( unittest.TestCase ): @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : str = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=__UpperCAmelCase ) __UpperCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __UpperCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase : str = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" ) __UpperCAmelCase : List[Any] = model(**__UpperCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __UpperCAmelCase : Optional[int] = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __UpperCAmelCase , atol=1E-3 ) )
16
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _A : @staticmethod def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 ) __UpperCAmelCase : Tuple = len(__UpperCAmelCase ) self.assertGreater(__UpperCAmelCase , 0 ) self.assertEqual( __UpperCAmelCase , [ { """score""": ANY(__UpperCAmelCase ), """label""": ANY(__UpperCAmelCase ), """box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )}, } for i in range(__UpperCAmelCase ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> Tuple: '''simple docstring''' pass @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) __UpperCAmelCase : str = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) __UpperCAmelCase : Any = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> List[str]: '''simple docstring''' pass @require_torch @slow def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = 0.2 __UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 2 __UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCamelCase = { '''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GraphormerForGraphClassification''', '''GraphormerModel''', '''GraphormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} _UpperCamelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } _UpperCamelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } _UpperCamelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[Any] = ConvBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars ): __UpperCAmelCase : Dict = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) ) __UpperCAmelCase : Union[str, Any] = do_lower_case __UpperCAmelCase : str = strip_accents __UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars __UpperCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase ) __UpperCAmelCase : List[Any] = do_lower_case def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
16
1
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor _UpperCamelCase = random.Random() def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str]=1.0 , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=None ): """simple docstring""" if rng is None: __UpperCAmelCase : Dict = global_rng __UpperCAmelCase : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _A ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=400 , __UpperCAmelCase=2_000 , __UpperCAmelCase=24 , __UpperCAmelCase=24 , __UpperCAmelCase=0.0 , __UpperCAmelCase=16_000 , __UpperCAmelCase=True , __UpperCAmelCase=True , ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Dict = batch_size __UpperCAmelCase : List[str] = min_seq_length __UpperCAmelCase : str = max_seq_length __UpperCAmelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __UpperCAmelCase : Optional[Any] = feature_size __UpperCAmelCase : Tuple = num_mel_bins __UpperCAmelCase : Union[str, Any] = padding_value __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : Tuple = return_attention_mask __UpperCAmelCase : List[Any] = do_normalize def __A ( self ) -> int: '''simple docstring''' return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __A ( self , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> int: '''simple docstring''' def _flatten(__UpperCAmelCase ): return list(itertools.chain(*__UpperCAmelCase ) ) if equal_length: __UpperCAmelCase : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __UpperCAmelCase : Optional[int] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __UpperCAmelCase : str = [np.asarray(__UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = SpeechaTextFeatureExtractionTester(self ) def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1E-3 ) ) def __A ( self ) -> List[str]: '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __UpperCAmelCase : List[str] = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs] # Test feature size __UpperCAmelCase : Dict = feature_extractor(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input __UpperCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features __UpperCAmelCase : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) # Test batched __UpperCAmelCase : Union[str, Any] = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_features __UpperCAmelCase : List[Any] = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)] __UpperCAmelCase : Dict = np.asarray(__UpperCAmelCase ) __UpperCAmelCase : Any = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_features __UpperCAmelCase : List[Any] = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __UpperCAmelCase : Tuple = ["""longest""", """max_length""", """do_not_pad"""] __UpperCAmelCase : Tuple = [None, 16, None] for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ): __UpperCAmelCase : Optional[int] = feature_extractor( __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase ) __UpperCAmelCase : int = inputs.input_features __UpperCAmelCase : int = inputs.attention_mask __UpperCAmelCase : str = [np.sum(__UpperCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __UpperCAmelCase : int = ["""longest""", """max_length""", """do_not_pad"""] __UpperCAmelCase : str = [None, 16, None] for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ): __UpperCAmelCase : List[str] = feature_extractor( __UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" , return_attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Tuple = inputs.input_features __UpperCAmelCase : str = inputs.attention_mask __UpperCAmelCase : Any = [np.sum(__UpperCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __UpperCAmelCase : Any = feature_extractor( __UpperCAmelCase , padding="""max_length""" , max_length=4 , truncation=__UpperCAmelCase , return_tensors="""np""" , return_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : List[Any] = inputs.input_features __UpperCAmelCase : Any = inputs.attention_mask __UpperCAmelCase : str = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __UpperCAmelCase : Dict = feature_extractor( __UpperCAmelCase , padding="""longest""" , max_length=4 , truncation=__UpperCAmelCase , return_tensors="""np""" , return_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Dict = inputs.input_features __UpperCAmelCase : List[str] = inputs.attention_mask __UpperCAmelCase : str = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) __UpperCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __UpperCAmelCase : Any = feature_extractor( __UpperCAmelCase , padding="""longest""" , max_length=16 , truncation=__UpperCAmelCase , return_tensors="""np""" , return_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : List[str] = inputs.input_features __UpperCAmelCase : Tuple = inputs.attention_mask __UpperCAmelCase : List[Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def __A ( self ) -> Any: '''simple docstring''' import torch __UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __UpperCAmelCase : List[str] = np.random.rand(100 , 32 ).astype(np.floataa ) __UpperCAmelCase : Optional[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __UpperCAmelCase : Any = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __UpperCAmelCase : List[str] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def __A ( self , __UpperCAmelCase ) -> int: '''simple docstring''' from datasets import load_dataset __UpperCAmelCase : Optional[int] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech __UpperCAmelCase : List[Any] = ds.sort("""id""" ).select(range(__UpperCAmelCase ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def __A ( self ) -> Tuple: '''simple docstring''' # fmt: off __UpperCAmelCase : Any = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on __UpperCAmelCase : Tuple = self._load_datasamples(1 ) __UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __UpperCAmelCase : Union[str, Any] = feature_extractor(__UpperCAmelCase , return_tensors="""pt""" ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , __UpperCAmelCase , atol=1E-4 ) )
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCamelCase = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''OwlViTFeatureExtractor'''] _UpperCamelCase = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
1
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ): """simple docstring""" __UpperCAmelCase : Dict = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1E-5, """token_type_vocab_size""": 2, } __UpperCAmelCase : Union[str, Any] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __UpperCAmelCase : List[Any] = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCAmelCase__ , output_all_encodings=lowerCAmelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCAmelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __UpperCAmelCase : Any = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab __UpperCAmelCase : List[str] = os.path.join(get_home_dir() , """models""" ) __UpperCAmelCase : Union[str, Any] = _load_vocab(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , cls=lowerCAmelCase__ ) __UpperCAmelCase : List[str] = nlp.model.BERTModel( lowerCAmelCase__ , len(lowerCAmelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCAmelCase__ , use_token_type_embed=lowerCAmelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCAmelCase__ , use_decoder=lowerCAmelCase__ , ) original_bort.load_parameters(lowerCAmelCase__ , cast_dtype=lowerCAmelCase__ , ignore_extra=lowerCAmelCase__ ) __UpperCAmelCase : Union[str, Any] = original_bort._collect_params_with_prefix() # Build our config 🤗 __UpperCAmelCase : str = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(lowerCAmelCase__ ), } __UpperCAmelCase : Tuple = BertConfig.from_dict(lowerCAmelCase__ ) __UpperCAmelCase : str = BertForMaskedLM(lowerCAmelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCAmelCase__ : List[str] ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ): __UpperCAmelCase : Union[str, Any] = hf_param.shape __UpperCAmelCase : Optional[int] = to_torch(params[gluon_param] ) __UpperCAmelCase : List[Any] = gluon_param.shape assert ( shape_hf == shape_gluon ), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param __UpperCAmelCase : List[str] = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) __UpperCAmelCase : Any = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) __UpperCAmelCase : List[str] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) __UpperCAmelCase : List[Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __UpperCAmelCase : List[Any] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __UpperCAmelCase : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __UpperCAmelCase : BertSelfAttention = layer.attention.self __UpperCAmelCase : Any = check_and_map_params( self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) __UpperCAmelCase : Any = check_and_map_params( self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) __UpperCAmelCase : Optional[int] = check_and_map_params( self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) __UpperCAmelCase : Dict = check_and_map_params( self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) __UpperCAmelCase : Optional[Any] = check_and_map_params( self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) __UpperCAmelCase : List[Any] = check_and_map_params( self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output __UpperCAmelCase : BertSelfOutput = layer.attention.output __UpperCAmelCase : str = check_and_map_params( self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' ) __UpperCAmelCase : List[str] = check_and_map_params( self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' ) __UpperCAmelCase : Optional[int] = check_and_map_params( self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' ) __UpperCAmelCase : str = check_and_map_params( self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate __UpperCAmelCase : BertIntermediate = layer.intermediate __UpperCAmelCase : Union[str, Any] = check_and_map_params( intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) __UpperCAmelCase : List[str] = check_and_map_params( intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output __UpperCAmelCase : BertOutput = layer.output __UpperCAmelCase : List[Any] = check_and_map_params( bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) __UpperCAmelCase : Any = check_and_map_params( bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) __UpperCAmelCase : Tuple = check_and_map_params( bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) __UpperCAmelCase : Tuple = check_and_map_params( bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __UpperCAmelCase : List[str] = RobertaTokenizer.from_pretrained("""roberta-base""" ) __UpperCAmelCase : Dict = tokenizer.encode_plus(lowerCAmelCase__ )["""input_ids"""] # Get gluon output __UpperCAmelCase : int = mx.nd.array([input_ids] ) __UpperCAmelCase : int = original_bort(inputs=lowerCAmelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = BertModel.from_pretrained(lowerCAmelCase__ ) hf_bort_model.eval() __UpperCAmelCase : Optional[int] = tokenizer.encode_plus(lowerCAmelCase__ , return_tensors="""pt""" ) __UpperCAmelCase : Dict = hf_bort_model(**lowerCAmelCase__ )[0] __UpperCAmelCase : int = output_gluon[0].asnumpy() __UpperCAmelCase : str = output_hf[0].detach().numpy() __UpperCAmelCase : Optional[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item() __UpperCAmelCase : Any = np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , lowerCAmelCase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _UpperCamelCase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
16
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor _UpperCamelCase = logging.get_logger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
16
1
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union _UpperCamelCase = TypeVar('''T''') _UpperCamelCase = Union[List[T], Tuple[T, ...]] _UpperCamelCase = Union[T, List[T], Dict[str, T]] _UpperCamelCase = Union[str, bytes, os.PathLike]
16
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _SCREAMING_SNAKE_CASE : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) __UpperCAmelCase : List[Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) __UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : Dict = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) __UpperCAmelCase : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' import torch __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = pipeline("""text-classification""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : Any = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : int = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_classifier, ["HuggingFace is in", "This is another test"] def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : int = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __UpperCAmelCase : Union[str, Any] = """HuggingFace is in""" __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) __UpperCAmelCase : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""] __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase ) __UpperCAmelCase : Any = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , ) __UpperCAmelCase : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} __UpperCAmelCase : Optional[int] = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __UpperCAmelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(__UpperCAmelCase ): text_classifier(__UpperCAmelCase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
16
1
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.17.0.dev0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''') _UpperCamelCase = logging.getLogger(__name__) @dataclass class _A : _SCREAMING_SNAKE_CASE : Optional[str] = field( default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , ) _SCREAMING_SNAKE_CASE : int = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _SCREAMING_SNAKE_CASE : bool = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) _SCREAMING_SNAKE_CASE : bool = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) _SCREAMING_SNAKE_CASE : Optional[int] = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) _SCREAMING_SNAKE_CASE : Optional[int] = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) _SCREAMING_SNAKE_CASE : Optional[int] = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the training data."} ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the validation data."} ) _SCREAMING_SNAKE_CASE : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the test data."} ) def __A ( self ) -> List[Any]: '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __UpperCAmelCase : int = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __UpperCAmelCase : Union[str, Any] = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _A : _SCREAMING_SNAKE_CASE : str = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) _SCREAMING_SNAKE_CASE : bool = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) _SCREAMING_SNAKE_CASE : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) _SCREAMING_SNAKE_CASE : bool = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __UpperCAmelCase : List[Any] = training_args.get_process_log_level() logger.setLevel(lowerCAmelCase__ ) datasets.utils.logging.set_verbosity(lowerCAmelCase__ ) transformers.utils.logging.set_verbosity(lowerCAmelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __UpperCAmelCase : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCAmelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __UpperCAmelCase : List[str] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __UpperCAmelCase : Union[str, Any] = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __UpperCAmelCase : Union[str, Any] = data_args.train_file.split(""".""" )[-1] __UpperCAmelCase : int = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __UpperCAmelCase : Union[str, Any] = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f'load a local file for {key}: {data_files[key]}' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __UpperCAmelCase : List[str] = load_dataset("""csv""" , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __UpperCAmelCase : Dict = load_dataset("""json""" , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __UpperCAmelCase : Union[str, Any] = raw_datasets["""train"""].features["""label"""].names __UpperCAmelCase : Optional[Any] = len(lowerCAmelCase__ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase : str = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __UpperCAmelCase : List[Any] = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase__ , ) __UpperCAmelCase : str = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __UpperCAmelCase : Dict = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __UpperCAmelCase : int = False # Some models have set the order of the labels to use, so let's make sure we do use it. __UpperCAmelCase : Optional[int] = {"""Refused""": 0, """Entailed""": 1} __UpperCAmelCase : Optional[Any] = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __UpperCAmelCase : Any = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(lowerCAmelCase__ : Dict ): # Tokenize the texts def _convert_table_text_to_pandas(lowerCAmelCase__ : int ): __UpperCAmelCase : Union[str, Any] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __UpperCAmelCase : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __UpperCAmelCase : Tuple = examples["""statement"""] __UpperCAmelCase : Dict = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __UpperCAmelCase : List[str] = tokenizer(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ) __UpperCAmelCase : Union[str, Any] = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __UpperCAmelCase : str = raw_datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __UpperCAmelCase : Optional[Any] = raw_datasets["""train"""] if data_args.max_train_samples is not None: __UpperCAmelCase : Tuple = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __UpperCAmelCase : Tuple = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __UpperCAmelCase : List[str] = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __UpperCAmelCase : Optional[int] = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __UpperCAmelCase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowerCAmelCase__ ) ) , 3 ): logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowerCAmelCase__ : EvalPrediction ): __UpperCAmelCase : List[str] = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase__ ) else p.predictions __UpperCAmelCase : Optional[int] = np.argmax(lowerCAmelCase__ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __UpperCAmelCase : Union[str, Any] = default_data_collator elif training_args.fpaa: __UpperCAmelCase : Dict = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 ) else: __UpperCAmelCase : List[Any] = None # Initialize our Trainer __UpperCAmelCase : Optional[Any] = Trainer( model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , ) # Training if training_args.do_train: __UpperCAmelCase : List[str] = None if training_args.resume_from_checkpoint is not None: __UpperCAmelCase : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCAmelCase : Union[str, Any] = last_checkpoint __UpperCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase__ ) __UpperCAmelCase : str = train_result.metrics __UpperCAmelCase : Optional[int] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ ) ) __UpperCAmelCase : Optional[int] = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowerCAmelCase__ ) trainer.save_metrics("""train""" , lowerCAmelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __UpperCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=lowerCAmelCase__ ) __UpperCAmelCase : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ ) __UpperCAmelCase : int = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) trainer.log_metrics("""eval""" , lowerCAmelCase__ ) trainer.save_metrics("""eval""" , lowerCAmelCase__ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __UpperCAmelCase : str = predict_dataset.remove_columns("""label""" ) __UpperCAmelCase : Union[str, Any] = trainer.predict(lowerCAmelCase__ , metric_key_prefix="""predict""" ).predictions __UpperCAmelCase : Tuple = np.argmax(lowerCAmelCase__ , axis=1 ) __UpperCAmelCase : int = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(lowerCAmelCase__ , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowerCAmelCase__ ): __UpperCAmelCase : List[str] = label_list[item] writer.write(f'{index}\t{item}\n' ) __UpperCAmelCase : Optional[int] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**lowerCAmelCase__ ) else: trainer.create_model_card(**lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" main() if __name__ == "__main__": main()
16
'''simple docstring''' from ..utils import DummyObject, requires_backends class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] )
16
1
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["image_processor", "tokenizer"] _SCREAMING_SNAKE_CASE : Union[str, Any] = "OwlViTImageProcessor" _SCREAMING_SNAKE_CASE : Tuple = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) __UpperCAmelCase : Dict = kwargs.pop("""feature_extractor""" ) __UpperCAmelCase : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ) -> str: '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )): __UpperCAmelCase : List[Any] = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )] elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ): __UpperCAmelCase : Optional[int] = [] # Maximum number of queries across batch __UpperCAmelCase : Dict = max([len(__UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__UpperCAmelCase ) != max_num_queries: __UpperCAmelCase : int = t + [""" """] * (max_num_queries - len(__UpperCAmelCase )) __UpperCAmelCase : Optional[Any] = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) encodings.append(__UpperCAmelCase ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": __UpperCAmelCase : List[str] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __UpperCAmelCase : Any = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCAmelCase : Any = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __UpperCAmelCase : int = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCAmelCase : List[Any] = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) __UpperCAmelCase : int = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCAmelCase : Any = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __UpperCAmelCase : List[str] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) __UpperCAmelCase : Any = BatchEncoding() __UpperCAmelCase : Tuple = input_ids __UpperCAmelCase : List[Any] = attention_mask if query_images is not None: __UpperCAmelCase : List[str] = BatchEncoding() __UpperCAmelCase : str = self.image_processor( __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values __UpperCAmelCase : List[Any] = query_pixel_values if images is not None: __UpperCAmelCase : Tuple = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: __UpperCAmelCase : List[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCAmelCase : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def __A ( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCAmelCase , ) return self.image_processor_class @property def __A ( self ) -> int: '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCAmelCase , ) return self.image_processor
16
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class _A : def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase ) __UpperCAmelCase : List[str] = length __UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa ) __UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Dict: '''simple docstring''' return self.length def __getitem__( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Any = True def __A ( self , __UpperCAmelCase=None ) -> str: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : Optional[int] = False return x * self.a[0] + self.b[0] class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]: '''simple docstring''' super().__init__() __UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : str = True def __A ( self , __UpperCAmelCase=None ) -> Tuple: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : int = False return x * self.a + self.b def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} __UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" ) __UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )} def tokenize_function(lowerCAmelCase__ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : List[Any] = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" ) if "label" in examples: __UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCAmelCase : Tuple = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCAmelCase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 ) __UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
16
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def lowercase_ ( ): """simple docstring""" print("""Making key files...""" ) make_key_files("""rsa""" , 1024 ) print("""Key files generation successful.""" ) def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" print("""Generating prime p...""" ) __UpperCAmelCase : str = rabinMiller.generate_large_prime(lowerCAmelCase__ ) print("""Generating prime q...""" ) __UpperCAmelCase : int = rabinMiller.generate_large_prime(lowerCAmelCase__ ) __UpperCAmelCase : str = p * q print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" ) while True: __UpperCAmelCase : List[Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(lowerCAmelCase__ , (p - 1) * (q - 1) ) == 1: break print("""Calculating d that is mod inverse of e...""" ) __UpperCAmelCase : Dict = cryptoMath.find_mod_inverse(lowerCAmelCase__ , (p - 1) * (q - 1) ) __UpperCAmelCase : List[str] = (n, e) __UpperCAmelCase : List[Any] = (n, d) return (public_key, private_key) def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ): """simple docstring""" if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ): print("""\nWARNING:""" ) print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' """Use a different name or delete these files and re-run this program.""" ) sys.exit() __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_key(lowerCAmelCase__ ) print(f'\nWriting public key to file {name}_pubkey.txt...' ) with open(f'{name}_pubkey.txt' , """w""" ) as out_file: out_file.write(f'{key_size},{public_key[0]},{public_key[1]}' ) print(f'Writing private key to file {name}_privkey.txt...' ) with open(f'{name}_privkey.txt' , """w""" ) as out_file: out_file.write(f'{key_size},{private_key[0]},{private_key[1]}' ) if __name__ == "__main__": main()
16
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None @property def __A ( self ) -> Optional[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = (3, 32, 128) __UpperCAmelCase : Tuple = tempfile.mkdtemp() # fmt: off __UpperCAmelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on __UpperCAmelCase : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + """\n""" ) __UpperCAmelCase : List[Any] = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } __UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) return image_input def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = self.get_tokenizer() __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) __UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[str] = self.prepare_image_inputs() __UpperCAmelCase : str = image_processor(__UpperCAmelCase , return_tensors="""np""" ) __UpperCAmelCase : int = processor(images=__UpperCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Dict = """test""" __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = """test""" __UpperCAmelCase : int = self.prepare_image_inputs() __UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase : Optional[Any] = processor.char_decode(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase ) __UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in decoded_tok] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Optional[Any] = self.get_tokenizer() __UpperCAmelCase : Any = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : str = None __UpperCAmelCase : Dict = self.prepare_image_inputs() __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Any = self.get_image_processor() __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : str = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = torch.randn(1 , 27 , 38 ) __UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 50_257 ) __UpperCAmelCase : Any = torch.randn(1 , 27 , 30_522 ) __UpperCAmelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
16
1
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ): """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: __UpperCAmelCase : List[str] = _modexpt(lowerCAmelCase__ , exponent // 2 , lowerCAmelCase__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(lowerCAmelCase__ , exponent - 1 , lowerCAmelCase__ )) % modulo_value def lowercase_ ( lowerCAmelCase__ : int = 1777 , lowerCAmelCase__ : int = 1855 , lowerCAmelCase__ : int = 8 ): """simple docstring""" __UpperCAmelCase : List[str] = base for _ in range(1 , lowerCAmelCase__ ): __UpperCAmelCase : Any = _modexpt(lowerCAmelCase__ , lowerCAmelCase__ , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
16
'''simple docstring''' from collections.abc import Sequence def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ): """simple docstring""" if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) __UpperCAmelCase : Any = nums[0] for i in range(1 , len(lowerCAmelCase__ ) ): __UpperCAmelCase : Union[str, Any] = nums[i] __UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _UpperCamelCase = int(input('''Enter number of elements : ''').strip()) _UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) _UpperCamelCase = { '''configuration_speecht5''': [ '''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''', '''SpeechT5Config''', '''SpeechT5HifiGanConfig''', ], '''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''], '''processing_speecht5''': ['''SpeechT5Processor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''SpeechT5Tokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SpeechT5ForSpeechToText''', '''SpeechT5ForSpeechToSpeech''', '''SpeechT5ForTextToSpeech''', '''SpeechT5Model''', '''SpeechT5PreTrainedModel''', '''SpeechT5HifiGan''', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = data __UpperCAmelCase : int = previous __UpperCAmelCase : Union[str, Any] = next_node def __str__( self ) -> str: '''simple docstring''' return f'{self.data}' def __A ( self ) -> int: '''simple docstring''' return self.data def __A ( self ) -> List[str]: '''simple docstring''' return self.next def __A ( self ) -> str: '''simple docstring''' return self.previous class _A : def __init__( self , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = head def __iter__( self ) -> str: '''simple docstring''' return self def __A ( self ) -> str: '''simple docstring''' if not self.current: raise StopIteration else: __UpperCAmelCase : List[str] = self.current.get_data() __UpperCAmelCase : int = self.current.get_next() return value class _A : def __init__( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = None # First node in list __UpperCAmelCase : List[str] = None # Last node in list def __str__( self ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = self.head __UpperCAmelCase : Optional[int] = [] while current is not None: nodes.append(current.get_data() ) __UpperCAmelCase : Any = current.get_next() return " ".join(str(__UpperCAmelCase ) for node in nodes ) def __contains__( self , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.head while current: if current.get_data() == value: return True __UpperCAmelCase : Optional[Any] = current.get_next() return False def __iter__( self ) -> str: '''simple docstring''' return LinkedListIterator(self.head ) def __A ( self ) -> List[Any]: '''simple docstring''' if self.head: return self.head.get_data() return None def __A ( self ) -> Optional[Any]: '''simple docstring''' if self.tail: return self.tail.get_data() return None def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: __UpperCAmelCase : str = node __UpperCAmelCase : List[str] = node else: self.insert_before_node(self.head , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: self.set_head(__UpperCAmelCase ) else: self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[int] = Node(__UpperCAmelCase ) if self.head is None: self.set_head(__UpperCAmelCase ) else: self.set_tail(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Tuple = node __UpperCAmelCase : List[Any] = node.previous if node.get_previous() is None: __UpperCAmelCase : str = node_to_insert else: __UpperCAmelCase : Optional[Any] = node_to_insert __UpperCAmelCase : List[Any] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : List[str] = node __UpperCAmelCase : Union[str, Any] = node.next if node.get_next() is None: __UpperCAmelCase : Dict = node_to_insert else: __UpperCAmelCase : Any = node_to_insert __UpperCAmelCase : List[str] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Optional[Any] = Node(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.head while node: if current_position == position: self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase ) return current_position += 1 __UpperCAmelCase : int = node.next self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Node: '''simple docstring''' __UpperCAmelCase : Dict = self.head while node: if node.get_data() == item: return node __UpperCAmelCase : List[str] = node.get_next() raise Exception("""Node not found""" ) def __A ( self , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if (node := self.get_node(__UpperCAmelCase )) is not None: if node == self.head: __UpperCAmelCase : Optional[int] = self.head.get_next() if node == self.tail: __UpperCAmelCase : Union[str, Any] = self.tail.get_previous() self.remove_node_pointers(__UpperCAmelCase ) @staticmethod def __A ( __UpperCAmelCase ) -> None: '''simple docstring''' if node.get_next(): __UpperCAmelCase : Optional[Any] = node.previous if node.get_previous(): __UpperCAmelCase : int = node.next __UpperCAmelCase : Tuple = None __UpperCAmelCase : Union[str, Any] = None def __A ( self ) -> List[Any]: '''simple docstring''' return self.head is None def lowercase_ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
16
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer _UpperCamelCase = ['''bert-base-uncased''', '''bert-base-cased'''] _UpperCamelCase = '''hf-internal-testing/tiny-bert-tf-only''' if is_tf_available(): class _A ( tf.keras.Model ): def __init__( self , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' super().__init__() __UpperCAmelCase : Dict = tokenizer __UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__UpperCAmelCase ) __UpperCAmelCase : str = TFAutoModel.from_config(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.tokenizer(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = self.bert(**__UpperCAmelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class _A ( unittest.TestCase ): def __A ( self ) -> Optional[Any]: '''simple docstring''' super().setUp() __UpperCAmelCase : List[Any] = [ BertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false __UpperCAmelCase : Optional[int] = [TFBertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__UpperCAmelCase , use_fast_bert_tokenizer=__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __UpperCAmelCase : Any = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] __UpperCAmelCase : Any = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __A ( self ) -> Dict: '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): __UpperCAmelCase : List[str] = tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding="""longest""" ) __UpperCAmelCase : Optional[Any] = tf_tokenizer(__UpperCAmelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __UpperCAmelCase : Union[str, Any] = tf_tokenizer(self.paired_sentences ) __UpperCAmelCase : List[Any] = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def __A ( self ) -> Tuple: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __UpperCAmelCase : Dict = tf.function(__UpperCAmelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): __UpperCAmelCase : Tuple = tf.constant(__UpperCAmelCase ) __UpperCAmelCase : Any = compiled_tokenizer(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = tf_tokenizer(__UpperCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __A ( self ) -> Any: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __UpperCAmelCase : Optional[int] = ModelToSave(tokenizer=__UpperCAmelCase ) __UpperCAmelCase : List[str] = tf.convert_to_tensor(self.test_sentences ) __UpperCAmelCase : str = model(__UpperCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __UpperCAmelCase : Dict = Path(__UpperCAmelCase ) / """saved.model""" model.save(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tf.keras.models.load_model(__UpperCAmelCase ) __UpperCAmelCase : Any = loaded_model(__UpperCAmelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
16
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class _A : _SCREAMING_SNAKE_CASE : List[str] _SCREAMING_SNAKE_CASE : Optional[str] = None # Automatically constructed _SCREAMING_SNAKE_CASE : ClassVar[str] = "dict" _SCREAMING_SNAKE_CASE : ClassVar[Any] = None _SCREAMING_SNAKE_CASE : str = field(default="Translation" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ) -> Any: '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class _A : _SCREAMING_SNAKE_CASE : Optional[List] = None _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : Optional[str] = None # Automatically constructed _SCREAMING_SNAKE_CASE : ClassVar[str] = "dict" _SCREAMING_SNAKE_CASE : ClassVar[Any] = None _SCREAMING_SNAKE_CASE : str = field(default="TranslationVariableLanguages" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = sorted(set(self.languages ) ) if self.languages else None __UpperCAmelCase : int = len(self.languages ) if self.languages else None def __call__( self ) -> Optional[Any]: '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def __A ( self , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = set(self.languages ) if self.languages and set(__UpperCAmelCase ) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(__UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__UpperCAmelCase )}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __UpperCAmelCase : Dict = [] for lang, text in translation_dict.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = zip(*sorted(__UpperCAmelCase ) ) return {"language": languages, "translation": translations} def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCamelCase = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' from statistics import mean import numpy as np def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Tuple = 0 # Number of processes finished __UpperCAmelCase : Optional[int] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __UpperCAmelCase : Tuple = [0] * no_of_process # List to include calculation results __UpperCAmelCase : int = [0] * no_of_process # Sort by arrival time. __UpperCAmelCase : Dict = [burst_time[i] for i in np.argsort(lowerCAmelCase__ )] __UpperCAmelCase : Union[str, Any] = [process_name[i] for i in np.argsort(lowerCAmelCase__ )] arrival_time.sort() while no_of_process > finished_process_count: __UpperCAmelCase : Dict = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __UpperCAmelCase : Any = arrival_time[i] __UpperCAmelCase : Any = 0 # Index showing the location of the process being performed __UpperCAmelCase : Any = 0 # Saves the current response ratio. __UpperCAmelCase : List[str] = 0 for i in range(0 , lowerCAmelCase__ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __UpperCAmelCase : Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __UpperCAmelCase : Tuple = temp __UpperCAmelCase : List[str] = i # Calculate the turn around time __UpperCAmelCase : Tuple = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __UpperCAmelCase : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Optional[int] = [0] * no_of_process for i in range(0 , lowerCAmelCase__ ): __UpperCAmelCase : List[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _UpperCamelCase = 5 _UpperCamelCase = ['''A''', '''B''', '''C''', '''D''', '''E'''] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _UpperCamelCase = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : Union[str, Any] = seq_length __UpperCAmelCase : int = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[str] = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : str = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : Dict = type_sequence_label_size __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : Optional[Any] = num_choices __UpperCAmelCase : int = scope def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[Any] = None if self.use_input_mask: __UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Optional[int] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> List[str]: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_config() __UpperCAmelCase : List[Any] = 300 return config def __A ( self ) -> Dict: '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = self.prepare_config_and_inputs() __UpperCAmelCase : Tuple = True __UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : List[str] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[Any] = config_and_inputs __UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : int = False _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Dict = () def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = MraModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : List[Any] = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Any: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""MRA does not output attentions""" ) def __A ( self ) -> List[Any]: '''simple docstring''' return @require_torch class _A ( unittest.TestCase ): @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0] __UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : int = model(__UpperCAmelCase )[0] __UpperCAmelCase : Union[str, Any] = 50_265 __UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : Any = model(__UpperCAmelCase )[0] __UpperCAmelCase : Dict = 50_265 __UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : str = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int ): """simple docstring""" for attribute in key.split(""".""" ): __UpperCAmelCase : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: __UpperCAmelCase : Dict = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: __UpperCAmelCase : List[Any] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __UpperCAmelCase : Tuple = value elif weight_type == "weight_g": __UpperCAmelCase : int = value elif weight_type == "weight_v": __UpperCAmelCase : Dict = value elif weight_type == "bias": __UpperCAmelCase : Union[str, Any] = value else: __UpperCAmelCase : Union[str, Any] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ): """simple docstring""" __UpperCAmelCase : Dict = [] __UpperCAmelCase : Tuple = fairseq_model.state_dict() __UpperCAmelCase : Any = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __UpperCAmelCase : Tuple = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , ) __UpperCAmelCase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): __UpperCAmelCase : List[str] = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned): __UpperCAmelCase : Optional[int] = True if "*" in mapped_key: __UpperCAmelCase : Dict = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2] __UpperCAmelCase : List[Any] = mapped_key.replace("""*""" , lowerCAmelCase__ ) if "weight_g" in name: __UpperCAmelCase : str = """weight_g""" elif "weight_v" in name: __UpperCAmelCase : str = """weight_v""" elif "weight" in name: __UpperCAmelCase : Optional[int] = """weight""" elif "bias" in name: __UpperCAmelCase : Optional[int] = """bias""" else: __UpperCAmelCase : Optional[int] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ): """simple docstring""" __UpperCAmelCase : Optional[Any] = full_name.split("""conv_layers.""" )[-1] __UpperCAmelCase : Dict = name.split(""".""" ) __UpperCAmelCase : Union[str, Any] = int(items[0] ) __UpperCAmelCase : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __UpperCAmelCase : Optional[Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __UpperCAmelCase : Dict = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) __UpperCAmelCase : int = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __UpperCAmelCase : Optional[int] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[str]=True ): """simple docstring""" if config_path is not None: __UpperCAmelCase : Dict = HubertConfig.from_pretrained(lowerCAmelCase__ ) else: __UpperCAmelCase : Dict = HubertConfig() if is_finetuned: if dict_path: __UpperCAmelCase : Optional[Any] = Dictionary.load(lowerCAmelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCAmelCase : List[str] = target_dict.pad_index __UpperCAmelCase : Tuple = target_dict.bos_index __UpperCAmelCase : str = target_dict.eos_index __UpperCAmelCase : int = len(target_dict.symbols ) __UpperCAmelCase : Dict = os.path.join(lowerCAmelCase__ , """vocab.json""" ) if not os.path.isdir(lowerCAmelCase__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase__ ) ) return os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , lowerCAmelCase__ ) __UpperCAmelCase : List[str] = WavaVecaCTCTokenizer( lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase__ , ) __UpperCAmelCase : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ) __UpperCAmelCase : int = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) __UpperCAmelCase : Dict = HubertForCTC(lowerCAmelCase__ ) else: __UpperCAmelCase : int = HubertModel(lowerCAmelCase__ ) if is_finetuned: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __UpperCAmelCase : List[str] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _UpperCamelCase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
16
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Any = image_size __UpperCAmelCase : Dict = patch_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : List[Any] = embed_dim __UpperCAmelCase : str = depths __UpperCAmelCase : Dict = num_heads __UpperCAmelCase : str = window_size __UpperCAmelCase : int = mlp_ratio __UpperCAmelCase : Union[str, Any] = qkv_bias __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = drop_path_rate __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Optional[int] = use_absolute_embeddings __UpperCAmelCase : Any = patch_norm __UpperCAmelCase : Union[str, Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : Any = scope __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : Optional[int] = type_sequence_label_size __UpperCAmelCase : int = encoder_stride def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Tuple = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __A ( self ) -> Dict: '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = SwinvaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) __UpperCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = SwinvaForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : str = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = self.type_sequence_label_size __UpperCAmelCase : str = SwinvaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs __UpperCAmelCase : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : List[str] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Dict = False _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[str] = SwinvaModelTester(self ) __UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 ) def __A ( self ) -> Any: '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def __A ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def __A ( self ) -> Dict: '''simple docstring''' pass def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Tuple = model_class(__UpperCAmelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : str = [*signature.parameters.keys()] __UpperCAmelCase : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = True for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : int = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : str = outputs.attentions __UpperCAmelCase : Any = len(self.model_tester.depths ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase : Dict = True __UpperCAmelCase : int = config.window_size**2 __UpperCAmelCase : Any = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : Dict = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __UpperCAmelCase : Dict = len(__UpperCAmelCase ) # Check attention is always last and order is fine __UpperCAmelCase : Any = True __UpperCAmelCase : Any = True __UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): __UpperCAmelCase : Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __UpperCAmelCase : Optional[int] = 2 self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) ) __UpperCAmelCase : Tuple = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : List[Any] = outputs.hidden_states __UpperCAmelCase : List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # Swinv2 has a different seq_length __UpperCAmelCase : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __UpperCAmelCase : int = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = reshaped_hidden_states[0].shape __UpperCAmelCase : Any = ( reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = 3 __UpperCAmelCase : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase : int = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Tuple = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = SwinvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCAmelCase : List[Any] = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class _A ( unittest.TestCase ): @cached_property def __A ( self ) -> int: '''simple docstring''' return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( __UpperCAmelCase ) __UpperCAmelCase : Tuple = self.default_image_processor __UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase : Any = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(**__UpperCAmelCase ) # verify the logits __UpperCAmelCase : int = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''', } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = "blip_2_vision_model" def __init__( self , __UpperCAmelCase=1_408 , __UpperCAmelCase=6_144 , __UpperCAmelCase=39 , __UpperCAmelCase=16 , __UpperCAmelCase=224 , __UpperCAmelCase=14 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0_0001 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-10 , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Dict: '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Any = intermediate_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = patch_size __UpperCAmelCase : Tuple = image_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : Optional[int] = attention_dropout __UpperCAmelCase : Any = layer_norm_eps __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : List[Any] = qkv_bias @classmethod def __A ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : int = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("""model_type""" ) == "blip-2": __UpperCAmelCase : List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = "blip_2_qformer" def __init__( self , __UpperCAmelCase=30_522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=2 , __UpperCAmelCase=1_408 , **__UpperCAmelCase , ) -> int: '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) __UpperCAmelCase : Dict = vocab_size __UpperCAmelCase : int = hidden_size __UpperCAmelCase : Tuple = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Dict = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = max_position_embeddings __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Dict = layer_norm_eps __UpperCAmelCase : Tuple = position_embedding_type __UpperCAmelCase : List[str] = cross_attention_frequency __UpperCAmelCase : Tuple = encoder_hidden_size @classmethod def __A ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : Any = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("""model_type""" ) == "blip-2": __UpperCAmelCase : int = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = "blip-2" _SCREAMING_SNAKE_CASE : str = True def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=32 , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' super().__init__(**__UpperCAmelCase ) if vision_config is None: __UpperCAmelCase : Dict = {} logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" ) if qformer_config is None: __UpperCAmelCase : Dict = {} logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" ) if text_config is None: __UpperCAmelCase : Tuple = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) __UpperCAmelCase : Dict = BlipaVisionConfig(**__UpperCAmelCase ) __UpperCAmelCase : str = BlipaQFormerConfig(**__UpperCAmelCase ) __UpperCAmelCase : Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt""" __UpperCAmelCase : str = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase ) __UpperCAmelCase : str = self.text_config.tie_word_embeddings __UpperCAmelCase : Any = self.text_config.is_encoder_decoder __UpperCAmelCase : Dict = num_query_tokens __UpperCAmelCase : str = self.vision_config.hidden_size __UpperCAmelCase : Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __UpperCAmelCase : List[Any] = 1.0 __UpperCAmelCase : Optional[Any] = 0.02 @classmethod def __A ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCAmelCase , ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : str = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : str = self.vision_config.to_dict() __UpperCAmelCase : Tuple = self.qformer_config.to_dict() __UpperCAmelCase : str = self.text_config.to_dict() __UpperCAmelCase : Tuple = self.__class__.model_type return output
16
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( lowerCAmelCase__ : List[str] ): """simple docstring""" if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase__ ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCAmelCase : int = size if size is not None else {"""shortest_edge""": 256} __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : List[str] = size __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : Any = crop_size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Dict = do_rescale __UpperCAmelCase : List[str] = rescale_factor __UpperCAmelCase : Dict = offset __UpperCAmelCase : List[str] = do_normalize __UpperCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" in size: __UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size["""shortest_edge"""] , default_to_square=__UpperCAmelCase ) elif "height" in size and "width" in size: __UpperCAmelCase : Any = (size["""height"""], size["""width"""]) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : Any = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = image.astype(np.floataa ) if offset: __UpperCAmelCase : Tuple = image - (scale / 2) return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : Optional[Any] = to_numpy_array(__UpperCAmelCase ) if do_resize: __UpperCAmelCase : Optional[int] = self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) if do_center_crop: __UpperCAmelCase : Optional[int] = self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase ) if do_rescale: __UpperCAmelCase : int = self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , offset=__UpperCAmelCase ) if do_normalize: __UpperCAmelCase : List[str] = self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) return image def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' __UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : List[Any] = resample if resample is not None else self.resample __UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : List[Any] = offset if offset is not None else self.offset __UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : int = image_std if image_std is not None else self.image_std __UpperCAmelCase : Any = size if size is not None else self.size __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : str = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) if not valid_images(__UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) __UpperCAmelCase : int = make_batched(__UpperCAmelCase ) __UpperCAmelCase : Tuple = [ [ self._preprocess_image( image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , offset=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , ) for img in video ] for video in videos ] __UpperCAmelCase : Tuple = {"""pixel_values""": videos} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
16
1
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , **lowerCAmelCase__ : str ): """simple docstring""" __UpperCAmelCase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) __UpperCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) AutoTokenizer.from_pretrained(lowerCAmelCase__ ).save_pretrained(lowerCAmelCase__ ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
16
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Dict = LDMTextToImagePipeline _SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } _SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } _SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) __UpperCAmelCase : Any = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __UpperCAmelCase : Tuple = CLIPTextModel(__UpperCAmelCase ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __UpperCAmelCase : Dict = { """unet""": unet, """scheduler""": scheduler, """vqvae""": vae, """bert""": text_encoder, """tokenizer""": tokenizer, } return components def __A ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Any: '''simple docstring''' if str(__UpperCAmelCase ).startswith("""mps""" ): __UpperCAmelCase : int = torch.manual_seed(__UpperCAmelCase ) else: __UpperCAmelCase : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCAmelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : Dict = self.get_dummy_components() __UpperCAmelCase : Tuple = LDMTextToImagePipeline(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = pipe(**__UpperCAmelCase ).images __UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __UpperCAmelCase : Dict = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : int = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) __UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) __UpperCAmelCase : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.get_inputs(__UpperCAmelCase ) __UpperCAmelCase : int = pipe(**__UpperCAmelCase ).images __UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) __UpperCAmelCase : Tuple = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) __UpperCAmelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) __UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = self.get_inputs(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = pipe(**__UpperCAmelCase ).images[0] __UpperCAmelCase : Tuple = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) __UpperCAmelCase : Dict = np.abs(expected_image - image ).max() assert max_diff < 1E-3
16
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''spiece.model'''} _UpperCamelCase = { '''vocab_file''': { '''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''', } } class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token __UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) __UpperCAmelCase : int = 3 __UpperCAmelCase : str = do_lower_case __UpperCAmelCase : Optional[int] = remove_space __UpperCAmelCase : List[Any] = keep_accents __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) __UpperCAmelCase : List[str] = jieba __UpperCAmelCase : Any = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def __A ( self ) -> Any: '''simple docstring''' return len(self.sp_model ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : str = self.__dict__.copy() __UpperCAmelCase : Any = None return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase : Union[str, Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' if self.remove_space: __UpperCAmelCase : Optional[int] = """ """.join(inputs.strip().split() ) else: __UpperCAmelCase : str = inputs __UpperCAmelCase : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: __UpperCAmelCase : Optional[int] = unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) __UpperCAmelCase : List[str] = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: __UpperCAmelCase : Tuple = outputs.lower() return outputs def __A ( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.preprocess_text(__UpperCAmelCase ) __UpperCAmelCase : int = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = [] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): __UpperCAmelCase : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __UpperCAmelCase : Optional[Any] = cur_pieces[1:] else: __UpperCAmelCase : Optional[Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return self.sp_model.PieceToId(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' return self.sp_model.IdToPiece(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Tuple = [self.sep_token_id] __UpperCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : List[str] = [self.sep_token_id] __UpperCAmelCase : Any = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCAmelCase : List[str] = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: __UpperCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = super()._decode(*__UpperCAmelCase , **__UpperCAmelCase ) __UpperCAmelCase : Any = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
16
'''simple docstring''' from __future__ import annotations from typing import Any class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = row, column __UpperCAmelCase : Union[str, Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )] def __str__( self ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier __UpperCAmelCase : Optional[Any] = 0 for row_vector in self.array: for obj in row_vector: __UpperCAmelCase : Union[str, Any] = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) ) __UpperCAmelCase : Optional[int] = f'%{max_element_length}s' # Make string and return def single_line(__UpperCAmelCase ) -> str: nonlocal string_format_identifier __UpperCAmelCase : Any = """[""" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array ) return s def __repr__( self ) -> str: '''simple docstring''' return str(self ) def __A ( self , __UpperCAmelCase ) -> bool: '''simple docstring''' if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , __UpperCAmelCase ) -> Any: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = value def __add__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == another.row and self.column == another.column # Add __UpperCAmelCase : Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : Dict = -self[r, c] return result def __sub__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' return self + (-another) def __mul__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication __UpperCAmelCase : Optional[int] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] * another return result elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication assert self.column == another.row __UpperCAmelCase : Dict = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __UpperCAmelCase : List[Any] = f'Unsupported type given for another ({type(__UpperCAmelCase )})' raise TypeError(__UpperCAmelCase ) def __A ( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Dict = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[str] = self[r, c] return result def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __UpperCAmelCase : Optional[Any] = v.transpose() __UpperCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Dict = Matrix(3 , 3 , 0 ) for i in range(3 ): __UpperCAmelCase : Tuple = 1 print(f'a^(-1) is {ainv}' ) # u, v __UpperCAmelCase : Dict = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 1, 2, -3 __UpperCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = 4, -2, 5 print(f'u is {u}' ) print(f'v is {v}' ) print(f'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}' ) def lowercase_ ( ): """simple docstring""" import doctest doctest.testmod() testa()
16
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : int = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 0.9 , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCAmelCase : List[str] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : Any = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : Union[str, Any] = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) __UpperCAmelCase : Dict = do_resize __UpperCAmelCase : int = size __UpperCAmelCase : Optional[int] = crop_pct __UpperCAmelCase : int = resample __UpperCAmelCase : int = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : List[Any] = do_rescale __UpperCAmelCase : int = rescale_factor __UpperCAmelCase : Dict = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCAmelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) if crop_pct is not None: if "shortest_edge" in size: __UpperCAmelCase : Tuple = int(size["""shortest_edge"""] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __UpperCAmelCase : List[Any] = int(size["""height"""] / crop_pct ) else: __UpperCAmelCase : str = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct )) else: raise ValueError("""Invalid size for resize: {}""".format(__UpperCAmelCase ) ) __UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase ) else: if "shortest_edge" in size: __UpperCAmelCase : Tuple = get_resize_output_image_size(__UpperCAmelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCAmelCase ) elif "height" in size and "width" in size: __UpperCAmelCase : int = (size["""height"""], size["""width"""]) else: raise ValueError("""Invalid size for resize: {}""".format(__UpperCAmelCase ) ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Union[str, Any]: '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : int = crop_pct if crop_pct is not None else self.crop_pct __UpperCAmelCase : Tuple = resample if resample is not None else self.resample __UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Dict = image_std if image_std is not None else self.image_std __UpperCAmelCase : Any = size if size is not None else self.size __UpperCAmelCase : Any = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : int = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Optional[Any] = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) __UpperCAmelCase : List[Any] = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_pct is None: raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : Union[str, Any] = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __UpperCAmelCase : int = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __UpperCAmelCase : Union[str, Any] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __UpperCAmelCase : List[str] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[int] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __UpperCAmelCase : Dict = {"""pixel_values""": images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCamelCase = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
1
'''simple docstring''' from __future__ import annotations from random import random class _A : def __init__( self , __UpperCAmelCase = None ) -> str: '''simple docstring''' __UpperCAmelCase : int = value __UpperCAmelCase : Optional[Any] = random() __UpperCAmelCase : Node | None = None __UpperCAmelCase : Node | None = None def __repr__( self ) -> str: '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return f'\'{self.value}: {self.prior:.5}\'' else: return pformat( {f'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = str(self.value ) + """ """ __UpperCAmelCase : List[str] = str(self.left or """""" ) __UpperCAmelCase : Optional[Any] = str(self.right or """""" ) return value + left + right def lowercase_ ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ): """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: __UpperCAmelCase , __UpperCAmelCase : List[str] = split(root.left , lowerCAmelCase__ ) return left, root else: __UpperCAmelCase , __UpperCAmelCase : Tuple = split(root.right , lowerCAmelCase__ ) return root, right def lowercase_ ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : Node | None ): """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: __UpperCAmelCase : Optional[Any] = merge(left.right , lowerCAmelCase__ ) return left else: __UpperCAmelCase : int = merge(lowerCAmelCase__ , right.left ) return right def lowercase_ ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : str = Node(lowerCAmelCase__ ) __UpperCAmelCase , __UpperCAmelCase : int = split(lowerCAmelCase__ , lowerCAmelCase__ ) return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : List[Any] = split(lowerCAmelCase__ , value - 1 ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = split(lowerCAmelCase__ , lowerCAmelCase__ ) return merge(lowerCAmelCase__ , lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : Node | None ): """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value , end=""",""" ) inorder(root.right ) def lowercase_ ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : str ): """simple docstring""" for arg in args.split(): if arg[0] == "+": __UpperCAmelCase : Dict = insert(lowerCAmelCase__ , int(arg[1:] ) ) elif arg[0] == "-": __UpperCAmelCase : Dict = erase(lowerCAmelCase__ , int(arg[1:] ) ) else: print("""Unknown command""" ) return root def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Any = None print( """enter numbers to create a tree, + value to add value into treap, """ """- value to erase all nodes with value. 'q' to quit. """ ) __UpperCAmelCase : Tuple = input() while args != "q": __UpperCAmelCase : int = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ ) print(lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = input() print("""good by!""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
16
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING _SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING def __A ( self ) -> Any: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""}, ] , ) __UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser""", }, ] , ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() __UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow @require_torch def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(__UpperCAmelCase ) @slow @require_tf def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2_201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 12_790, """token_str""": """ Lyon""", }, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) __UpperCAmelCase : Tuple = None __UpperCAmelCase : int = None self.run_pipeline_test(__UpperCAmelCase , [] ) @require_tf def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : str = None self.run_pipeline_test(__UpperCAmelCase , [] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) __UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = [ f'This is another {tokenizer.mask_token} test', ] return fill_masker, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = fill_masker.tokenizer __UpperCAmelCase : Union[str, Any] = fill_masker.model __UpperCAmelCase : Tuple = fill_masker( f'This is a {tokenizer.mask_token}' , ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , ) with self.assertRaises(__UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__UpperCAmelCase ): fill_masker("""This is""" ) self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = tokenizer.get_vocab() __UpperCAmelCase : Dict = sorted(vocab.keys() )[:2] # Pipeline argument __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase ) __UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Any = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Call argument __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Score equivalence __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs] __UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ) == set(__UpperCAmelCase ): __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] ) with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 ) __UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = tokenizer.get_vocab() __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) # top_k=2, ntargets=3 __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results __UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokenizer.get_vocab() # String duplicates + id duplicates __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]] __UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__UpperCAmelCase ) , 3 ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Dict = fill_masker( f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , )
16
1
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } _UpperCamelCase = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } _UpperCamelCase = { '''ctrl''': 256, } _UpperCamelCase = { '''Pregnancy''': 16_8629, '''Christianity''': 7675, '''Explain''': 10_6423, '''Fitness''': 6_3440, '''Saving''': 6_3163, '''Ask''': 2_7171, '''Ass''': 9_5985, '''Joke''': 16_3509, '''Questions''': 4_5622, '''Thoughts''': 4_9605, '''Retail''': 5_2342, '''Feminism''': 16_4338, '''Writing''': 1_1992, '''Atheism''': 19_2263, '''Netflix''': 4_8616, '''Computing''': 3_9639, '''Opinion''': 4_3213, '''Alone''': 4_4967, '''Funny''': 5_8917, '''Gaming''': 4_0358, '''Human''': 4088, '''India''': 1331, '''Joker''': 7_7138, '''Diet''': 3_6206, '''Legal''': 1_1859, '''Norman''': 4939, '''Tip''': 7_2689, '''Weight''': 5_2343, '''Movies''': 4_6273, '''Running''': 2_3425, '''Science''': 2090, '''Horror''': 3_7793, '''Confession''': 6_0572, '''Finance''': 1_2250, '''Politics''': 1_6360, '''Scary''': 19_1985, '''Support''': 1_2654, '''Technologies''': 3_2516, '''Teenage''': 6_6160, '''Event''': 3_2769, '''Learned''': 6_7460, '''Notion''': 18_2770, '''Wikipedia''': 3_7583, '''Books''': 6665, '''Extract''': 7_6050, '''Confessions''': 10_2701, '''Conspiracy''': 7_5932, '''Links''': 6_3674, '''Narcissus''': 15_0425, '''Relationship''': 5_4766, '''Relationships''': 13_4796, '''Reviews''': 4_1671, '''News''': 4256, '''Translation''': 2_6820, '''multilingual''': 12_8406, } def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : List[str] = set() __UpperCAmelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : List[str] = char __UpperCAmelCase : int = set(lowerCAmelCase__ ) return pairs class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : Tuple = CONTROL_CODES def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<unk>" , **__UpperCAmelCase ) -> str: '''simple docstring''' super().__init__(unk_token=__UpperCAmelCase , **__UpperCAmelCase ) with open(__UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle: __UpperCAmelCase : Any = json.load(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()} with open(__UpperCAmelCase , encoding="""utf-8""" ) as merges_handle: __UpperCAmelCase : Dict = merges_handle.read().split("""\n""" )[1:-1] __UpperCAmelCase : List[Any] = [tuple(merge.split() ) for merge in merges] __UpperCAmelCase : Union[str, Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __UpperCAmelCase : Optional[Any] = {} @property def __A ( self ) -> Optional[Any]: '''simple docstring''' return len(self.encoder ) def __A ( self ) -> Optional[Any]: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' if token in self.cache: return self.cache[token] __UpperCAmelCase : str = tuple(__UpperCAmelCase ) __UpperCAmelCase : Tuple = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) __UpperCAmelCase : Dict = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: __UpperCAmelCase : Dict = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : Any = bigram __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Optional[int] = 0 while i < len(__UpperCAmelCase ): try: __UpperCAmelCase : Tuple = word.index(__UpperCAmelCase , __UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : str = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : List[Any] = tuple(__UpperCAmelCase ) __UpperCAmelCase : Any = new_word if len(__UpperCAmelCase ) == 1: break else: __UpperCAmelCase : Dict = get_pairs(__UpperCAmelCase ) __UpperCAmelCase : Any = """@@ """.join(__UpperCAmelCase ) __UpperCAmelCase : Tuple = word[:-4] __UpperCAmelCase : Optional[int] = word return word def __A ( self , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = [] __UpperCAmelCase : Union[str, Any] = re.findall(r"""\S+\n?""" , __UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) ) return split_tokens def __A ( self , __UpperCAmelCase ) -> Dict: '''simple docstring''' return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) ) def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' return self.decoder.get(__UpperCAmelCase , self.unk_token ) def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Any = """ """.join(__UpperCAmelCase ).replace("""@@ """ , """""" ).strip() return out_string def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCAmelCase : List[Any] = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : Optional[Any] = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + """\n""" ) __UpperCAmelCase : Tuple = 0 with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) __UpperCAmelCase : Optional[int] = token_index writer.write(""" """.join(__UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
16
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"image": Image()} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"labels": ClassLabel} ) _SCREAMING_SNAKE_CASE : str = "image" _SCREAMING_SNAKE_CASE : str = "labels" def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , __UpperCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) __UpperCAmelCase : int = copy.deepcopy(self ) __UpperCAmelCase : str = self.label_schema.copy() __UpperCAmelCase : Optional[Any] = features[self.label_column] __UpperCAmelCase : Optional[int] = label_schema return task_template @property def __A ( self ) -> Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
16
1
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=12 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0 , __UpperCAmelCase=None , ) -> Dict: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Union[str, Any] = seq_length __UpperCAmelCase : Any = is_training __UpperCAmelCase : Any = use_input_mask __UpperCAmelCase : Union[str, Any] = use_labels __UpperCAmelCase : Union[str, Any] = vocab_size __UpperCAmelCase : Optional[int] = hidden_size __UpperCAmelCase : Union[str, Any] = projection_dim __UpperCAmelCase : List[str] = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : Union[str, Any] = intermediate_size __UpperCAmelCase : List[Any] = dropout __UpperCAmelCase : Dict = attention_dropout __UpperCAmelCase : Optional[int] = max_position_embeddings __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : int = scope __UpperCAmelCase : Optional[Any] = bos_token_id def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: __UpperCAmelCase : int = input_mask.numpy() __UpperCAmelCase , __UpperCAmelCase : int = input_mask.shape __UpperCAmelCase : Any = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = 1 __UpperCAmelCase : str = 0 __UpperCAmelCase : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(__UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = TFBlipTextModel(config=__UpperCAmelCase ) __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , training=__UpperCAmelCase ) __UpperCAmelCase : int = model(__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () _SCREAMING_SNAKE_CASE : List[Any] = False _SCREAMING_SNAKE_CASE : Any = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Optional[Any] = BlipTextModelTester(self ) __UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' pass def __A ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def __A ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def __A ( self ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def __A ( self ) -> Optional[int]: '''simple docstring''' pass @slow def __A ( self ) -> int: '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = TFBlipTextModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase=True ) -> List[Any]: '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCAmelCase )
16
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Dict = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[Any] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[str] = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = LlamaModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : Optional[Any] = """single_label_classification""" __UpperCAmelCase : int = input_dict["""input_ids"""] __UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : str = """multi_label_classification""" __UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> Dict: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """ __UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" ) __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
16
1
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING _SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING def __A ( self ) -> Any: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""}, ] , ) __UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser""", }, ] , ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() __UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow @require_torch def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(__UpperCAmelCase ) @slow @require_tf def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2_201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 12_790, """token_str""": """ Lyon""", }, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) __UpperCAmelCase : Tuple = None __UpperCAmelCase : int = None self.run_pipeline_test(__UpperCAmelCase , [] ) @require_tf def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : str = None self.run_pipeline_test(__UpperCAmelCase , [] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) __UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = [ f'This is another {tokenizer.mask_token} test', ] return fill_masker, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = fill_masker.tokenizer __UpperCAmelCase : Union[str, Any] = fill_masker.model __UpperCAmelCase : Tuple = fill_masker( f'This is a {tokenizer.mask_token}' , ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , ) with self.assertRaises(__UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__UpperCAmelCase ): fill_masker("""This is""" ) self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = tokenizer.get_vocab() __UpperCAmelCase : Dict = sorted(vocab.keys() )[:2] # Pipeline argument __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase ) __UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Any = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Call argument __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Score equivalence __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs] __UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ) == set(__UpperCAmelCase ): __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] ) with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 ) __UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = tokenizer.get_vocab() __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) # top_k=2, ntargets=3 __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results __UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokenizer.get_vocab() # String duplicates + id duplicates __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]] __UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__UpperCAmelCase ) , 3 ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Dict = fill_masker( f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , )
16
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip _UpperCamelCase = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ): """simple docstring""" return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths ) def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = [] if args.gold_data_mode == "qa": __UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ ) for answer_list in data[1]: __UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ ) answers.append(lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : str = [[reference] for reference in references] __UpperCAmelCase : Optional[int] = 0 for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ): total += 1 em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __UpperCAmelCase : int = 100.0 * em / total __UpperCAmelCase : Dict = 100.0 * fa / total logger.info(f'F1: {fa:.2f}' ) logger.info(f'EM: {em:.2f}' ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : Tuple = args.k __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = 0 for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] ) __UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k __UpperCAmelCase : List[str] = 100.0 * em / total logger.info(f'Precision@{k}: {em: .2f}' ) def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ): """simple docstring""" def strip_title(lowerCAmelCase__ : Optional[int] ): if title.startswith("""\"""" ): __UpperCAmelCase : List[Any] = title[1:] if title.endswith("""\"""" ): __UpperCAmelCase : int = title[:-1] return title __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device ) __UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ ) __UpperCAmelCase : int = question_enc_outputs[0] __UpperCAmelCase : Dict = rag_model.retriever( lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) __UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) __UpperCAmelCase : Union[str, Any] = [] for docs in all_docs: __UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(lowerCAmelCase__ ) ) return provenance_strings def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ): """simple docstring""" with torch.no_grad(): __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ ) __UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device ) __UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device ) __UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) __UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) if args.print_predictions: for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) ) return answers def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) __UpperCAmelCase : str = parser.parse_args() __UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = {} if args.model_type is None: __UpperCAmelCase : str = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration __UpperCAmelCase : Dict = args.n_docs if args.index_name is not None: __UpperCAmelCase : Union[str, Any] = args.index_name if args.index_path is not None: __UpperCAmelCase : Dict = args.index_path else: __UpperCAmelCase : str = BartForConditionalGeneration __UpperCAmelCase : str = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k __UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) __UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ ) model.retriever.init_retrieval() else: __UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: __UpperCAmelCase : Union[str, Any] = [] for line in tqdm(lowerCAmelCase__ ): questions.append(line.strip() ) if len(lowerCAmelCase__ ) == args.eval_batch_size: __UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" ) preds_file.flush() __UpperCAmelCase : List[str] = [] if len(lowerCAmelCase__ ) > 0: __UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) ) preds_file.flush() score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": _UpperCamelCase = get_args() main(args)
16
1
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCamelCase = 16 _UpperCamelCase = 32 def lowercase_ ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 ): """simple docstring""" __UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __UpperCAmelCase : Any = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCAmelCase__ : List[str] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCAmelCase : Tuple = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCAmelCase__ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCAmelCase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCAmelCase : Optional[int] = 16 elif accelerator.mixed_precision != "no": __UpperCAmelCase : Optional[int] = 8 else: __UpperCAmelCase : List[Any] = None return tokenizer.pad( lowerCAmelCase__ , padding="""longest""" , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. __UpperCAmelCase : Dict = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) __UpperCAmelCase : int = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _UpperCamelCase = mocked_dataloaders # noqa: F811 def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase__ ) == "1": __UpperCAmelCase : Optional[int] = 2 # Initialize accelerator __UpperCAmelCase : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCAmelCase : Tuple = config["""lr"""] __UpperCAmelCase : Optional[Any] = int(config["""num_epochs"""] ) __UpperCAmelCase : Optional[Any] = int(config["""seed"""] ) __UpperCAmelCase : List[Any] = int(config["""batch_size"""] ) __UpperCAmelCase : List[Any] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase__ ) def inner_training_loop(lowerCAmelCase__ : Dict ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCAmelCase : List[str] = model.to(accelerator.device ) # Instantiate optimizer __UpperCAmelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) __UpperCAmelCase , __UpperCAmelCase : List[str] = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ ) # Instantiate scheduler __UpperCAmelCase : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCAmelCase : List[str] = model(**lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = outputs.loss accelerator.backward(lowerCAmelCase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCAmelCase : Dict = model(**lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) __UpperCAmelCase : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , lowerCAmelCase__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) __UpperCAmelCase : Tuple = parser.parse_args() __UpperCAmelCase : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
16
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _A : @staticmethod def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 ) __UpperCAmelCase : Tuple = len(__UpperCAmelCase ) self.assertGreater(__UpperCAmelCase , 0 ) self.assertEqual( __UpperCAmelCase , [ { """score""": ANY(__UpperCAmelCase ), """label""": ANY(__UpperCAmelCase ), """box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )}, } for i in range(__UpperCAmelCase ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> Tuple: '''simple docstring''' pass @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) __UpperCAmelCase : str = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) __UpperCAmelCase : Any = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> List[str]: '''simple docstring''' pass @require_torch @slow def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = 0.2 __UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 2 __UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
16
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Tuple: '''simple docstring''' __UpperCAmelCase : str = parent __UpperCAmelCase : List[Any] = batch_size __UpperCAmelCase : List[Any] = seq_length __UpperCAmelCase : List[str] = is_training __UpperCAmelCase : List[Any] = use_input_mask __UpperCAmelCase : Any = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : int = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Tuple = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : int = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : Optional[Any] = type_sequence_label_size __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : Optional[int] = num_choices __UpperCAmelCase : str = scope def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Tuple = None if self.use_input_mask: __UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> int: '''simple docstring''' return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__UpperCAmelCase , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : str = FalconModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict: '''simple docstring''' __UpperCAmelCase : Any = True __UpperCAmelCase : Optional[int] = FalconModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = FalconForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int: '''simple docstring''' __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Optional[Any] = FalconForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : int = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : int = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : List[Any] = (FalconForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[Any] = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Optional[int] = False def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Tuple = FalconModelTester(self ) __UpperCAmelCase : str = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase , *__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: __UpperCAmelCase : int = alibi self.model_tester.create_and_check_model(__UpperCAmelCase , *__UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[str] = 3 __UpperCAmelCase : Optional[int] = input_dict["""input_ids"""] __UpperCAmelCase : Optional[int] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : str = FalconForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = 3 __UpperCAmelCase : List[Any] = """single_label_classification""" __UpperCAmelCase : List[Any] = input_dict["""input_ids"""] __UpperCAmelCase : Optional[int] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : List[str] = FalconForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Dict = input_dict["""input_ids"""] __UpperCAmelCase : Optional[Any] = FalconForCausalLM(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : str = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) __UpperCAmelCase : int = input_ids.shape[0] __UpperCAmelCase : Optional[int] = model._convert_to_rw_cache(result.past_key_values ) __UpperCAmelCase : Any = model._convert_cache_to_standard_format(__UpperCAmelCase , __UpperCAmelCase ) for layer in range(len(__UpperCAmelCase ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[str] = 3 __UpperCAmelCase : Tuple = """multi_label_classification""" __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : List[str] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Any = FalconForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__UpperCAmelCase , """use_cache""" ): return __UpperCAmelCase : Tuple = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) if "use_cache" not in inputs: __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : int = model(**__UpperCAmelCase ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return __UpperCAmelCase : List[Any] = ( getattr(__UpperCAmelCase , """decoder_layers""" , __UpperCAmelCase ) or getattr(__UpperCAmelCase , """num_decoder_layers""" , __UpperCAmelCase ) or config.num_hidden_layers ) __UpperCAmelCase : str = getattr(__UpperCAmelCase , """num_kv_heads""" , config.num_attention_heads ) __UpperCAmelCase : Optional[Any] = getattr(__UpperCAmelCase , """d_model""" , config.hidden_size ) __UpperCAmelCase : List[Any] = embed_dim // num_attention_heads __UpperCAmelCase : List[str] = outputs["""past_key_values"""] self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = inputs["""input_ids"""].shape for i in range(__UpperCAmelCase ): if config.new_decoder_architecture: __UpperCAmelCase : str = config.num_attention_heads elif config.multi_query: __UpperCAmelCase : Dict = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class _A ( unittest.TestCase ): @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) __UpperCAmelCase : Optional[Any] = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) model.eval() model.to(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase ) __UpperCAmelCase : List[str] = ( """My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.""" ) __UpperCAmelCase : List[Any] = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=19 ) __UpperCAmelCase : Tuple = tokenizer.batch_decode(__UpperCAmelCase )[0] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def __A ( self ) -> List[str]: '''simple docstring''' # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: __UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__UpperCAmelCase ) __UpperCAmelCase : Tuple = FalconForCausalLM.from_pretrained(__UpperCAmelCase ) model.eval() model.to(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 ) model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 ) model.generate(**__UpperCAmelCase , num_beams=2 , max_new_tokens=4 ) @slow def __A ( self ) -> int: '''simple docstring''' # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase ) __UpperCAmelCase : int = FalconForCausalLM.from_pretrained(__UpperCAmelCase ) model.eval() model.to(device=__UpperCAmelCase ) __UpperCAmelCase : int = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase ) # Test results are the same with and without cache __UpperCAmelCase : List[str] = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 , use_cache=__UpperCAmelCase ) __UpperCAmelCase : int = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 , use_cache=__UpperCAmelCase ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
16
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} _UpperCamelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } _UpperCamelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } _UpperCamelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[Any] = ConvBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars ): __UpperCAmelCase : Dict = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) ) __UpperCAmelCase : Union[str, Any] = do_lower_case __UpperCAmelCase : str = strip_accents __UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars __UpperCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase ) __UpperCAmelCase : List[Any] = do_lower_case def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCamelCase = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''OwlViTFeatureExtractor'''] _UpperCamelCase = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"image": Image()} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"labels": ClassLabel} ) _SCREAMING_SNAKE_CASE : str = "image" _SCREAMING_SNAKE_CASE : str = "labels" def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , __UpperCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) __UpperCAmelCase : int = copy.deepcopy(self ) __UpperCAmelCase : str = self.label_schema.copy() __UpperCAmelCase : Optional[Any] = features[self.label_column] __UpperCAmelCase : Optional[int] = label_schema return task_template @property def __A ( self ) -> Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
16
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor _UpperCamelCase = logging.get_logger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
16
1
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(lowerCAmelCase__ , """_dynamo""" ): return False return isinstance(lowerCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule ) def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool = True ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __UpperCAmelCase : Any = is_compiled_module(lowerCAmelCase__ ) if is_compiled: __UpperCAmelCase : Optional[Any] = model __UpperCAmelCase : List[Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __UpperCAmelCase : str = model.module if not keep_fpaa_wrapper: __UpperCAmelCase : Union[str, Any] = getattr(lowerCAmelCase__ , """forward""" ) __UpperCAmelCase : Optional[Any] = model.__dict__.pop("""_original_forward""" , lowerCAmelCase__ ) if original_forward is not None: while hasattr(lowerCAmelCase__ , """__wrapped__""" ): __UpperCAmelCase : Dict = forward.__wrapped__ if forward == original_forward: break __UpperCAmelCase : str = forward if getattr(lowerCAmelCase__ , """_converted_to_transformer_engine""" , lowerCAmelCase__ ): convert_model(lowerCAmelCase__ , to_transformer_engine=lowerCAmelCase__ ) if is_compiled: __UpperCAmelCase : Optional[Any] = model __UpperCAmelCase : Tuple = compiled_model return model def lowercase_ ( ): """simple docstring""" PartialState().wait_for_everyone() def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(lowerCAmelCase__ , lowerCAmelCase__ ) elif PartialState().local_process_index == 0: torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) @contextmanager def lowercase_ ( **lowerCAmelCase__ : int ): """simple docstring""" for key, value in kwargs.items(): __UpperCAmelCase : Any = str(lowerCAmelCase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ): """simple docstring""" if not hasattr(lowerCAmelCase__ , """__qualname__""" ) and not hasattr(lowerCAmelCase__ , """__name__""" ): __UpperCAmelCase : Union[str, Any] = getattr(lowerCAmelCase__ , """__class__""" , lowerCAmelCase__ ) if hasattr(lowerCAmelCase__ , """__qualname__""" ): return obj.__qualname__ if hasattr(lowerCAmelCase__ , """__name__""" ): return obj.__name__ return str(lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ): """simple docstring""" for key, value in source.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __UpperCAmelCase : Any = destination.setdefault(lowerCAmelCase__ , {} ) merge_dicts(lowerCAmelCase__ , lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[Any] = value return destination def lowercase_ ( lowerCAmelCase__ : int = None ): """simple docstring""" if port is None: __UpperCAmelCase : List[Any] = 29500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("""localhost""", port) ) == 0
16
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _SCREAMING_SNAKE_CASE : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) __UpperCAmelCase : List[Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) __UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : Dict = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) __UpperCAmelCase : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' import torch __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = pipeline("""text-classification""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : Any = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : int = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_classifier, ["HuggingFace is in", "This is another test"] def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : int = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __UpperCAmelCase : Union[str, Any] = """HuggingFace is in""" __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) __UpperCAmelCase : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""] __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase ) __UpperCAmelCase : Any = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , ) __UpperCAmelCase : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} __UpperCAmelCase : Optional[int] = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __UpperCAmelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(__UpperCAmelCase ): text_classifier(__UpperCAmelCase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
16
1
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) _UpperCamelCase = '''\ Text data. Second line of data.''' _UpperCamelCase = '''file''' @pytest.fixture(scope="""session""" ) def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : str = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""") __UpperCAmelCase : Optional[int] = bytes(lowerCAmelCase__ , """utf-8""" ) with zstd.open(lowerCAmelCase__ , """wb""" ) as f: f.write(lowerCAmelCase__ ) return path @pytest.fixture def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ): """simple docstring""" with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase__ ) , """w""" ) as f: f.write(lowerCAmelCase__ ) return FILE_PATH @pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] ) def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : str = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path} __UpperCAmelCase : Any = input_paths[compression_format] __UpperCAmelCase : int = tmp_path / """cache""" __UpperCAmelCase : int = DownloadConfig(cache_dir=lowerCAmelCase__ , extract_compressed_file=lowerCAmelCase__ ) __UpperCAmelCase : str = cached_path(lowerCAmelCase__ , download_config=lowerCAmelCase__ ) with open(lowerCAmelCase__ ) as f: __UpperCAmelCase : Optional[int] = f.read() with open(lowerCAmelCase__ ) as f: __UpperCAmelCase : str = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("""default_extracted""" , [True, False] ) @pytest.mark.parametrize("""default_cache_dir""" , [True, False] ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ): """simple docstring""" __UpperCAmelCase : Optional[int] = """custom_cache""" __UpperCAmelCase : List[str] = """custom_extracted_dir""" __UpperCAmelCase : int = tmp_path / """custom_extracted_path""" if default_extracted: __UpperCAmelCase : str = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""") else: monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowerCAmelCase__ ) monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCAmelCase__ ) ) __UpperCAmelCase : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __UpperCAmelCase : Any = xz_file __UpperCAmelCase : Optional[Any] = ( DownloadConfig(extract_compressed_file=lowerCAmelCase__ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase__ ) ) __UpperCAmelCase : List[str] = cached_path(lowerCAmelCase__ , download_config=lowerCAmelCase__ ) assert Path(lowerCAmelCase__ ).parent.parts[-2:] == expected def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = str(Path(lowerCAmelCase__ ).resolve() ) assert cached_path(lowerCAmelCase__ ) == text_file # relative path __UpperCAmelCase : Tuple = str(Path(lowerCAmelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowerCAmelCase__ ) == text_file def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" __UpperCAmelCase : List[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" ) with pytest.raises(lowerCAmelCase__ ): cached_path(lowerCAmelCase__ ) # relative path __UpperCAmelCase : Union[str, Any] = """./__missing_file__.txt""" with pytest.raises(lowerCAmelCase__ ): cached_path(lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : int = get_from_cache(f'tmp://{tmpfs_file}' ) with open(lowerCAmelCase__ ) as f: __UpperCAmelCase : Dict = f.read() assert output_file_content == FILE_CONTENT @patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ ) def lowercase_ ( ): """simple docstring""" with pytest.raises(lowerCAmelCase__ ): cached_path("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(lowerCAmelCase__ ): http_get("""https://huggingface.co""" , temp_file=lowerCAmelCase__ ) with pytest.raises(lowerCAmelCase__ ): http_head("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(lowerCAmelCase__ ): ftp_get("""ftp://huggingface.co""" , temp_file=lowerCAmelCase__ ) with pytest.raises(lowerCAmelCase__ ): ftp_head("""ftp://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : Optional[int] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(lowerCAmelCase__ ): fsspec_get("""s3://huggingface.co""" , temp_file=lowerCAmelCase__ ) with pytest.raises(lowerCAmelCase__ ): fsspec_head("""s3://huggingface.co""" )
16
'''simple docstring''' from ..utils import DummyObject, requires_backends class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] )
16
1
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : List[str] = FileLock(str(tmpdir / """foo.lock""" ) ) __UpperCAmelCase : Any = FileLock(str(tmpdir / """foo.lock""" ) ) __UpperCAmelCase : int = 0.01 with locka.acquire(): with pytest.raises(lowerCAmelCase__ ): __UpperCAmelCase : Dict = time.time() locka.acquire(lowerCAmelCase__ ) assert time.time() - _start > timeout def lowercase_ ( lowerCAmelCase__ : Any ): """simple docstring""" __UpperCAmelCase : int = """a""" * 1000 + """.lock""" __UpperCAmelCase : int = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowerCAmelCase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 __UpperCAmelCase : Optional[int] = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowerCAmelCase__ ): locka.acquire(0 )
16
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class _A : def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase ) __UpperCAmelCase : List[str] = length __UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa ) __UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Dict: '''simple docstring''' return self.length def __getitem__( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Any = True def __A ( self , __UpperCAmelCase=None ) -> str: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : Optional[int] = False return x * self.a[0] + self.b[0] class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]: '''simple docstring''' super().__init__() __UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : str = True def __A ( self , __UpperCAmelCase=None ) -> Tuple: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : int = False return x * self.a + self.b def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} __UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" ) __UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )} def tokenize_function(lowerCAmelCase__ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : List[Any] = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" ) if "label" in examples: __UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCAmelCase : Tuple = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCAmelCase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 ) __UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
16
1
'''simple docstring''' import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration _UpperCamelCase = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : int = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase : List[Any] = list(s_dict.keys() ) for key in keys: __UpperCAmelCase : List[str] = key for k, v in WHISPER_MAPPING.items(): if k in key: __UpperCAmelCase : Any = new_key.replace(lowerCAmelCase__ , lowerCAmelCase__ ) print(f'{key} -> {new_key}' ) __UpperCAmelCase : List[str] = s_dict.pop(lowerCAmelCase__ ) return s_dict def lowercase_ ( lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : List[str] = emb.weight.shape __UpperCAmelCase : Optional[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ ) __UpperCAmelCase : int = emb.weight.data return lin_layer def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ): """simple docstring""" os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) __UpperCAmelCase : List[str] = os.path.basename(lowerCAmelCase__ ) __UpperCAmelCase : Any = url.split("""/""" )[-2] __UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) if os.path.exists(lowerCAmelCase__ ) and not os.path.isfile(lowerCAmelCase__ ): raise RuntimeError(f'{download_target} exists and is not a regular file' ) if os.path.isfile(lowerCAmelCase__ ): __UpperCAmelCase : List[Any] = open(lowerCAmelCase__ , """rb""" ).read() if hashlib.shaaaa(lowerCAmelCase__ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' ) with urllib.request.urlopen(lowerCAmelCase__ ) as source, open(lowerCAmelCase__ , """wb""" ) as output: with tqdm( total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowerCAmelCase__ , unit_divisor=1024 ) as loop: while True: __UpperCAmelCase : Optional[Any] = source.read(8192 ) if not buffer: break output.write(lowerCAmelCase__ ) loop.update(len(lowerCAmelCase__ ) ) __UpperCAmelCase : Optional[Any] = open(lowerCAmelCase__ , """rb""" ).read() if hashlib.shaaaa(lowerCAmelCase__ ).hexdigest() != expected_shaaaa: raise RuntimeError( """Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" ) return model_bytes def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ): """simple docstring""" if ".pt" not in checkpoint_path: __UpperCAmelCase : Union[str, Any] = _download(_MODELS[checkpoint_path] ) else: __UpperCAmelCase : List[str] = torch.load(lowerCAmelCase__ , map_location="""cpu""" ) __UpperCAmelCase : Any = original_checkpoint["""dims"""] __UpperCAmelCase : Any = original_checkpoint["""model_state_dict"""] __UpperCAmelCase : Union[str, Any] = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(lowerCAmelCase__ ) rename_keys(lowerCAmelCase__ ) __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] __UpperCAmelCase : Tuple = WhisperConfig( vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowerCAmelCase__ , decoder_ffn_dim=lowerCAmelCase__ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , ) __UpperCAmelCase : Optional[int] = WhisperForConditionalGeneration(lowerCAmelCase__ ) __UpperCAmelCase , __UpperCAmelCase : Tuple = model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0 and not set(lowerCAmelCase__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f' but all the following weights are missing {missing}' ) if tie_embeds: __UpperCAmelCase : Optional[Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __UpperCAmelCase : str = proj_out_weights model.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _UpperCamelCase = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
16
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None @property def __A ( self ) -> Optional[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = (3, 32, 128) __UpperCAmelCase : Tuple = tempfile.mkdtemp() # fmt: off __UpperCAmelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on __UpperCAmelCase : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + """\n""" ) __UpperCAmelCase : List[Any] = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } __UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) return image_input def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = self.get_tokenizer() __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) __UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[str] = self.prepare_image_inputs() __UpperCAmelCase : str = image_processor(__UpperCAmelCase , return_tensors="""np""" ) __UpperCAmelCase : int = processor(images=__UpperCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Dict = """test""" __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = """test""" __UpperCAmelCase : int = self.prepare_image_inputs() __UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase : Optional[Any] = processor.char_decode(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase ) __UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in decoded_tok] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Optional[Any] = self.get_tokenizer() __UpperCAmelCase : Any = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : str = None __UpperCAmelCase : Dict = self.prepare_image_inputs() __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Any = self.get_image_processor() __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : str = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = torch.randn(1 , 27 , 38 ) __UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 50_257 ) __UpperCAmelCase : Any = torch.randn(1 , 27 , 30_522 ) __UpperCAmelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
16
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} _UpperCamelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } _UpperCamelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } _UpperCamelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[Any] = ConvBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars ): __UpperCAmelCase : Dict = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) ) __UpperCAmelCase : Union[str, Any] = do_lower_case __UpperCAmelCase : str = strip_accents __UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars __UpperCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase ) __UpperCAmelCase : List[Any] = do_lower_case def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
16
'''simple docstring''' from collections.abc import Sequence def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ): """simple docstring""" if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) __UpperCAmelCase : Any = nums[0] for i in range(1 , len(lowerCAmelCase__ ) ): __UpperCAmelCase : Union[str, Any] = nums[i] __UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _UpperCamelCase = int(input('''Enter number of elements : ''').strip()) _UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
16
1
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ): """simple docstring""" __UpperCAmelCase : List[str] = RemBertConfig.from_json_file(lowerCAmelCase__ ) print("""Building PyTorch model from configuration: {}""".format(str(lowerCAmelCase__ ) ) ) __UpperCAmelCase : Optional[int] = RemBertModel(lowerCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model print("""Save PyTorch model to {}""".format(lowerCAmelCase__ ) ) torch.save(model.state_dict() , lowerCAmelCase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _UpperCamelCase = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
16
'''simple docstring''' class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = data __UpperCAmelCase : int = previous __UpperCAmelCase : Union[str, Any] = next_node def __str__( self ) -> str: '''simple docstring''' return f'{self.data}' def __A ( self ) -> int: '''simple docstring''' return self.data def __A ( self ) -> List[str]: '''simple docstring''' return self.next def __A ( self ) -> str: '''simple docstring''' return self.previous class _A : def __init__( self , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = head def __iter__( self ) -> str: '''simple docstring''' return self def __A ( self ) -> str: '''simple docstring''' if not self.current: raise StopIteration else: __UpperCAmelCase : List[str] = self.current.get_data() __UpperCAmelCase : int = self.current.get_next() return value class _A : def __init__( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = None # First node in list __UpperCAmelCase : List[str] = None # Last node in list def __str__( self ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = self.head __UpperCAmelCase : Optional[int] = [] while current is not None: nodes.append(current.get_data() ) __UpperCAmelCase : Any = current.get_next() return " ".join(str(__UpperCAmelCase ) for node in nodes ) def __contains__( self , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.head while current: if current.get_data() == value: return True __UpperCAmelCase : Optional[Any] = current.get_next() return False def __iter__( self ) -> str: '''simple docstring''' return LinkedListIterator(self.head ) def __A ( self ) -> List[Any]: '''simple docstring''' if self.head: return self.head.get_data() return None def __A ( self ) -> Optional[Any]: '''simple docstring''' if self.tail: return self.tail.get_data() return None def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: __UpperCAmelCase : str = node __UpperCAmelCase : List[str] = node else: self.insert_before_node(self.head , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: self.set_head(__UpperCAmelCase ) else: self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[int] = Node(__UpperCAmelCase ) if self.head is None: self.set_head(__UpperCAmelCase ) else: self.set_tail(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Tuple = node __UpperCAmelCase : List[Any] = node.previous if node.get_previous() is None: __UpperCAmelCase : str = node_to_insert else: __UpperCAmelCase : Optional[Any] = node_to_insert __UpperCAmelCase : List[Any] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : List[str] = node __UpperCAmelCase : Union[str, Any] = node.next if node.get_next() is None: __UpperCAmelCase : Dict = node_to_insert else: __UpperCAmelCase : Any = node_to_insert __UpperCAmelCase : List[str] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Optional[Any] = Node(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.head while node: if current_position == position: self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase ) return current_position += 1 __UpperCAmelCase : int = node.next self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Node: '''simple docstring''' __UpperCAmelCase : Dict = self.head while node: if node.get_data() == item: return node __UpperCAmelCase : List[str] = node.get_next() raise Exception("""Node not found""" ) def __A ( self , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if (node := self.get_node(__UpperCAmelCase )) is not None: if node == self.head: __UpperCAmelCase : Optional[int] = self.head.get_next() if node == self.tail: __UpperCAmelCase : Union[str, Any] = self.tail.get_previous() self.remove_node_pointers(__UpperCAmelCase ) @staticmethod def __A ( __UpperCAmelCase ) -> None: '''simple docstring''' if node.get_next(): __UpperCAmelCase : Optional[Any] = node.previous if node.get_previous(): __UpperCAmelCase : int = node.next __UpperCAmelCase : Tuple = None __UpperCAmelCase : Union[str, Any] = None def __A ( self ) -> List[Any]: '''simple docstring''' return self.head is None def lowercase_ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''MLukeTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class _A : _SCREAMING_SNAKE_CASE : List[str] _SCREAMING_SNAKE_CASE : Optional[str] = None # Automatically constructed _SCREAMING_SNAKE_CASE : ClassVar[str] = "dict" _SCREAMING_SNAKE_CASE : ClassVar[Any] = None _SCREAMING_SNAKE_CASE : str = field(default="Translation" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ) -> Any: '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class _A : _SCREAMING_SNAKE_CASE : Optional[List] = None _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : Optional[str] = None # Automatically constructed _SCREAMING_SNAKE_CASE : ClassVar[str] = "dict" _SCREAMING_SNAKE_CASE : ClassVar[Any] = None _SCREAMING_SNAKE_CASE : str = field(default="TranslationVariableLanguages" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = sorted(set(self.languages ) ) if self.languages else None __UpperCAmelCase : int = len(self.languages ) if self.languages else None def __call__( self ) -> Optional[Any]: '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def __A ( self , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = set(self.languages ) if self.languages and set(__UpperCAmelCase ) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(__UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__UpperCAmelCase )}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __UpperCAmelCase : Dict = [] for lang, text in translation_dict.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = zip(*sorted(__UpperCAmelCase ) ) return {"language": languages, "translation": translations} def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
16
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class _A : def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase ) __UpperCAmelCase : List[str] = length __UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa ) __UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Dict: '''simple docstring''' return self.length def __getitem__( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Any = True def __A ( self , __UpperCAmelCase=None ) -> str: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : Optional[int] = False return x * self.a[0] + self.b[0] class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]: '''simple docstring''' super().__init__() __UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : str = True def __A ( self , __UpperCAmelCase=None ) -> Tuple: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : int = False return x * self.a + self.b def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} __UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" ) __UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )} def tokenize_function(lowerCAmelCase__ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : List[Any] = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" ) if "label" in examples: __UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCAmelCase : Tuple = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCAmelCase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 ) __UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
16
'''simple docstring''' from statistics import mean import numpy as np def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Tuple = 0 # Number of processes finished __UpperCAmelCase : Optional[int] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __UpperCAmelCase : Tuple = [0] * no_of_process # List to include calculation results __UpperCAmelCase : int = [0] * no_of_process # Sort by arrival time. __UpperCAmelCase : Dict = [burst_time[i] for i in np.argsort(lowerCAmelCase__ )] __UpperCAmelCase : Union[str, Any] = [process_name[i] for i in np.argsort(lowerCAmelCase__ )] arrival_time.sort() while no_of_process > finished_process_count: __UpperCAmelCase : Dict = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __UpperCAmelCase : Any = arrival_time[i] __UpperCAmelCase : Any = 0 # Index showing the location of the process being performed __UpperCAmelCase : Any = 0 # Saves the current response ratio. __UpperCAmelCase : List[str] = 0 for i in range(0 , lowerCAmelCase__ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __UpperCAmelCase : Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __UpperCAmelCase : Tuple = temp __UpperCAmelCase : List[str] = i # Calculate the turn around time __UpperCAmelCase : Tuple = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __UpperCAmelCase : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Optional[int] = [0] * no_of_process for i in range(0 , lowerCAmelCase__ ): __UpperCAmelCase : List[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _UpperCamelCase = 5 _UpperCamelCase = ['''A''', '''B''', '''C''', '''D''', '''E'''] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _UpperCamelCase = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
16
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase ) __UpperCAmelCase : int = AutoTokenizer.from_pretrained("""google/mt5-small""" ) __UpperCAmelCase : Any = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids __UpperCAmelCase : Union[str, Any] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids __UpperCAmelCase : str = model(input_ids.to(__UpperCAmelCase ) , labels=labels.to(__UpperCAmelCase ) ).loss __UpperCAmelCase : int = -(labels.shape[-1] * loss.item()) __UpperCAmelCase : Any = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
16
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : Union[str, Any] = seq_length __UpperCAmelCase : int = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[str] = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : str = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : Dict = type_sequence_label_size __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : Optional[Any] = num_choices __UpperCAmelCase : int = scope def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[Any] = None if self.use_input_mask: __UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Optional[int] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> List[str]: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_config() __UpperCAmelCase : List[Any] = 300 return config def __A ( self ) -> Dict: '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = self.prepare_config_and_inputs() __UpperCAmelCase : Tuple = True __UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : List[str] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[Any] = config_and_inputs __UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : int = False _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Dict = () def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = MraModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : List[Any] = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Any: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""MRA does not output attentions""" ) def __A ( self ) -> List[Any]: '''simple docstring''' return @require_torch class _A ( unittest.TestCase ): @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0] __UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : int = model(__UpperCAmelCase )[0] __UpperCAmelCase : Union[str, Any] = 50_265 __UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : Any = model(__UpperCAmelCase )[0] __UpperCAmelCase : Dict = 50_265 __UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : str = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
1
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" __UpperCAmelCase : str = hex_num.strip() if not hex_num: raise ValueError("""No value was passed to the function""" ) __UpperCAmelCase : Tuple = hex_num[0] == """-""" if is_negative: __UpperCAmelCase : Union[str, Any] = hex_num[1:] try: __UpperCAmelCase : Optional[Any] = int(lowerCAmelCase__ , 16 ) except ValueError: raise ValueError("""Invalid value was passed to the function""" ) __UpperCAmelCase : Union[str, Any] = """""" while int_num > 0: __UpperCAmelCase : int = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("""-""" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
16
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Any = image_size __UpperCAmelCase : Dict = patch_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : List[Any] = embed_dim __UpperCAmelCase : str = depths __UpperCAmelCase : Dict = num_heads __UpperCAmelCase : str = window_size __UpperCAmelCase : int = mlp_ratio __UpperCAmelCase : Union[str, Any] = qkv_bias __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = drop_path_rate __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Optional[int] = use_absolute_embeddings __UpperCAmelCase : Any = patch_norm __UpperCAmelCase : Union[str, Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : Any = scope __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : Optional[int] = type_sequence_label_size __UpperCAmelCase : int = encoder_stride def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Tuple = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __A ( self ) -> Dict: '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = SwinvaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) __UpperCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = SwinvaForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : str = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = self.type_sequence_label_size __UpperCAmelCase : str = SwinvaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs __UpperCAmelCase : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : List[str] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Dict = False _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[str] = SwinvaModelTester(self ) __UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 ) def __A ( self ) -> Any: '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def __A ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def __A ( self ) -> Dict: '''simple docstring''' pass def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Tuple = model_class(__UpperCAmelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : str = [*signature.parameters.keys()] __UpperCAmelCase : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = True for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : int = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : str = outputs.attentions __UpperCAmelCase : Any = len(self.model_tester.depths ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase : Dict = True __UpperCAmelCase : int = config.window_size**2 __UpperCAmelCase : Any = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : Dict = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __UpperCAmelCase : Dict = len(__UpperCAmelCase ) # Check attention is always last and order is fine __UpperCAmelCase : Any = True __UpperCAmelCase : Any = True __UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): __UpperCAmelCase : Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __UpperCAmelCase : Optional[int] = 2 self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) ) __UpperCAmelCase : Tuple = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : List[Any] = outputs.hidden_states __UpperCAmelCase : List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # Swinv2 has a different seq_length __UpperCAmelCase : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __UpperCAmelCase : int = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = reshaped_hidden_states[0].shape __UpperCAmelCase : Any = ( reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = 3 __UpperCAmelCase : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase : int = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Tuple = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = SwinvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCAmelCase : List[Any] = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class _A ( unittest.TestCase ): @cached_property def __A ( self ) -> int: '''simple docstring''' return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( __UpperCAmelCase ) __UpperCAmelCase : Tuple = self.default_image_processor __UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase : Any = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(**__UpperCAmelCase ) # verify the logits __UpperCAmelCase : int = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
1
'''simple docstring''' _UpperCamelCase = [ '''Audio''', '''Array2D''', '''Array3D''', '''Array4D''', '''Array5D''', '''ClassLabel''', '''Features''', '''Sequence''', '''Value''', '''Image''', '''Translation''', '''TranslationVariableLanguages''', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
16
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( lowerCAmelCase__ : List[str] ): """simple docstring""" if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase__ ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCAmelCase : int = size if size is not None else {"""shortest_edge""": 256} __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : List[str] = size __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : Any = crop_size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Dict = do_rescale __UpperCAmelCase : List[str] = rescale_factor __UpperCAmelCase : Dict = offset __UpperCAmelCase : List[str] = do_normalize __UpperCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" in size: __UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size["""shortest_edge"""] , default_to_square=__UpperCAmelCase ) elif "height" in size and "width" in size: __UpperCAmelCase : Any = (size["""height"""], size["""width"""]) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : Any = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = image.astype(np.floataa ) if offset: __UpperCAmelCase : Tuple = image - (scale / 2) return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : Optional[Any] = to_numpy_array(__UpperCAmelCase ) if do_resize: __UpperCAmelCase : Optional[int] = self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) if do_center_crop: __UpperCAmelCase : Optional[int] = self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase ) if do_rescale: __UpperCAmelCase : int = self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , offset=__UpperCAmelCase ) if do_normalize: __UpperCAmelCase : List[str] = self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) return image def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' __UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : List[Any] = resample if resample is not None else self.resample __UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : List[Any] = offset if offset is not None else self.offset __UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : int = image_std if image_std is not None else self.image_std __UpperCAmelCase : Any = size if size is not None else self.size __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : str = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) if not valid_images(__UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) __UpperCAmelCase : int = make_batched(__UpperCAmelCase ) __UpperCAmelCase : Tuple = [ [ self._preprocess_image( image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , offset=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , ) for img in video ] for video in videos ] __UpperCAmelCase : Tuple = {"""pixel_values""": videos} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
16
1
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ): """simple docstring""" print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(lowerCAmelCase__ ): for j in range(lowerCAmelCase__ ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ) , end="""\t""" ) else: print("""INF""" , end="""\t""" ) print() def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : Dict = [[float("""inf""" ) for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ ): for j in range(lowerCAmelCase__ ): __UpperCAmelCase : List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(lowerCAmelCase__ ): # looping through rows of graph array for i in range(lowerCAmelCase__ ): # looping through columns of graph array for j in range(lowerCAmelCase__ ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): __UpperCAmelCase : Union[str, Any] = dist[i][k] + dist[k][j] _print_dist(lowerCAmelCase__ , lowerCAmelCase__ ) return dist, v if __name__ == "__main__": _UpperCamelCase = int(input('''Enter number of vertices: ''')) _UpperCamelCase = int(input('''Enter number of edges: ''')) _UpperCamelCase = [[float('''inf''') for i in range(v)] for j in range(v)] for i in range(v): _UpperCamelCase = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('''\nEdge ''', i + 1) _UpperCamelCase = int(input('''Enter source:''')) _UpperCamelCase = int(input('''Enter destination:''')) _UpperCamelCase = float(input('''Enter weight:''')) _UpperCamelCase = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
16
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Dict = LDMTextToImagePipeline _SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } _SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } _SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) __UpperCAmelCase : Any = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __UpperCAmelCase : Tuple = CLIPTextModel(__UpperCAmelCase ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __UpperCAmelCase : Dict = { """unet""": unet, """scheduler""": scheduler, """vqvae""": vae, """bert""": text_encoder, """tokenizer""": tokenizer, } return components def __A ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Any: '''simple docstring''' if str(__UpperCAmelCase ).startswith("""mps""" ): __UpperCAmelCase : int = torch.manual_seed(__UpperCAmelCase ) else: __UpperCAmelCase : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCAmelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : Dict = self.get_dummy_components() __UpperCAmelCase : Tuple = LDMTextToImagePipeline(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = pipe(**__UpperCAmelCase ).images __UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __UpperCAmelCase : Dict = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : int = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) __UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) __UpperCAmelCase : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.get_inputs(__UpperCAmelCase ) __UpperCAmelCase : int = pipe(**__UpperCAmelCase ).images __UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) __UpperCAmelCase : Tuple = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) __UpperCAmelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) __UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = self.get_inputs(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = pipe(**__UpperCAmelCase ).images[0] __UpperCAmelCase : Tuple = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) __UpperCAmelCase : Dict = np.abs(expected_image - image ).max() assert max_diff < 1E-3
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
16
'''simple docstring''' from __future__ import annotations from typing import Any class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = row, column __UpperCAmelCase : Union[str, Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )] def __str__( self ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier __UpperCAmelCase : Optional[Any] = 0 for row_vector in self.array: for obj in row_vector: __UpperCAmelCase : Union[str, Any] = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) ) __UpperCAmelCase : Optional[int] = f'%{max_element_length}s' # Make string and return def single_line(__UpperCAmelCase ) -> str: nonlocal string_format_identifier __UpperCAmelCase : Any = """[""" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array ) return s def __repr__( self ) -> str: '''simple docstring''' return str(self ) def __A ( self , __UpperCAmelCase ) -> bool: '''simple docstring''' if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , __UpperCAmelCase ) -> Any: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = value def __add__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == another.row and self.column == another.column # Add __UpperCAmelCase : Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : Dict = -self[r, c] return result def __sub__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' return self + (-another) def __mul__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication __UpperCAmelCase : Optional[int] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] * another return result elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication assert self.column == another.row __UpperCAmelCase : Dict = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __UpperCAmelCase : List[Any] = f'Unsupported type given for another ({type(__UpperCAmelCase )})' raise TypeError(__UpperCAmelCase ) def __A ( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Dict = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[str] = self[r, c] return result def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __UpperCAmelCase : Optional[Any] = v.transpose() __UpperCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Dict = Matrix(3 , 3 , 0 ) for i in range(3 ): __UpperCAmelCase : Tuple = 1 print(f'a^(-1) is {ainv}' ) # u, v __UpperCAmelCase : Dict = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 1, 2, -3 __UpperCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = 4, -2, 5 print(f'u is {u}' ) print(f'v is {v}' ) print(f'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}' ) def lowercase_ ( ): """simple docstring""" import doctest doctest.testmod() testa()
16
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor _UpperCamelCase = logging.get_logger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use PoolFormerImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCamelCase = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
1
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("""Input value must be an 'int' type""" ) __UpperCAmelCase : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
16
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING _SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING def __A ( self ) -> Any: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""}, ] , ) __UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser""", }, ] , ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() __UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow @require_torch def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(__UpperCAmelCase ) @slow @require_tf def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2_201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 12_790, """token_str""": """ Lyon""", }, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) __UpperCAmelCase : Tuple = None __UpperCAmelCase : int = None self.run_pipeline_test(__UpperCAmelCase , [] ) @require_tf def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : str = None self.run_pipeline_test(__UpperCAmelCase , [] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) __UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = [ f'This is another {tokenizer.mask_token} test', ] return fill_masker, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = fill_masker.tokenizer __UpperCAmelCase : Union[str, Any] = fill_masker.model __UpperCAmelCase : Tuple = fill_masker( f'This is a {tokenizer.mask_token}' , ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , ) with self.assertRaises(__UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__UpperCAmelCase ): fill_masker("""This is""" ) self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = tokenizer.get_vocab() __UpperCAmelCase : Dict = sorted(vocab.keys() )[:2] # Pipeline argument __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase ) __UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Any = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Call argument __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Score equivalence __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs] __UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ) == set(__UpperCAmelCase ): __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] ) with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 ) __UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = tokenizer.get_vocab() __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) # top_k=2, ntargets=3 __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results __UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokenizer.get_vocab() # String duplicates + id duplicates __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]] __UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__UpperCAmelCase ) , 3 ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Dict = fill_masker( f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , )
16
1
'''simple docstring''' _UpperCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] _UpperCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] _UpperCamelCase = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ): """simple docstring""" assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __UpperCAmelCase : List[str] = year // 100 __UpperCAmelCase : int = (5 * (century % 4) + 2) % 7 __UpperCAmelCase : Optional[Any] = year % 100 __UpperCAmelCase : Optional[int] = centurian % 12 __UpperCAmelCase : str = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __UpperCAmelCase : List[str] = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) __UpperCAmelCase : int = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
16
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"image": Image()} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"labels": ClassLabel} ) _SCREAMING_SNAKE_CASE : str = "image" _SCREAMING_SNAKE_CASE : str = "labels" def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , __UpperCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) __UpperCAmelCase : int = copy.deepcopy(self ) __UpperCAmelCase : str = self.label_schema.copy() __UpperCAmelCase : Optional[Any] = features[self.label_column] __UpperCAmelCase : Optional[int] = label_schema return task_template @property def __A ( self ) -> Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCamelCase = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Dict = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[Any] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[str] = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = LlamaModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : Optional[Any] = """single_label_classification""" __UpperCAmelCase : int = input_dict["""input_ids"""] __UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : str = """multi_label_classification""" __UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> Dict: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """ __UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" ) __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
16
1
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : Union[str, Any] = seq_length __UpperCAmelCase : int = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[str] = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : str = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : Dict = type_sequence_label_size __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : Optional[Any] = num_choices __UpperCAmelCase : int = scope def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[Any] = None if self.use_input_mask: __UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Optional[int] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> List[str]: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_config() __UpperCAmelCase : List[Any] = 300 return config def __A ( self ) -> Dict: '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = self.prepare_config_and_inputs() __UpperCAmelCase : Tuple = True __UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : List[str] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[Any] = config_and_inputs __UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : int = False _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Dict = () def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = MraModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : List[Any] = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Any: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""MRA does not output attentions""" ) def __A ( self ) -> List[Any]: '''simple docstring''' return @require_torch class _A ( unittest.TestCase ): @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0] __UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : int = model(__UpperCAmelCase )[0] __UpperCAmelCase : Union[str, Any] = 50_265 __UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : Any = model(__UpperCAmelCase )[0] __UpperCAmelCase : Dict = 50_265 __UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : str = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip _UpperCamelCase = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ): """simple docstring""" return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths ) def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = [] if args.gold_data_mode == "qa": __UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ ) for answer_list in data[1]: __UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ ) answers.append(lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : str = [[reference] for reference in references] __UpperCAmelCase : Optional[int] = 0 for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ): total += 1 em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __UpperCAmelCase : int = 100.0 * em / total __UpperCAmelCase : Dict = 100.0 * fa / total logger.info(f'F1: {fa:.2f}' ) logger.info(f'EM: {em:.2f}' ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : Tuple = args.k __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = 0 for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] ) __UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k __UpperCAmelCase : List[str] = 100.0 * em / total logger.info(f'Precision@{k}: {em: .2f}' ) def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ): """simple docstring""" def strip_title(lowerCAmelCase__ : Optional[int] ): if title.startswith("""\"""" ): __UpperCAmelCase : List[Any] = title[1:] if title.endswith("""\"""" ): __UpperCAmelCase : int = title[:-1] return title __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device ) __UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ ) __UpperCAmelCase : int = question_enc_outputs[0] __UpperCAmelCase : Dict = rag_model.retriever( lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) __UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) __UpperCAmelCase : Union[str, Any] = [] for docs in all_docs: __UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(lowerCAmelCase__ ) ) return provenance_strings def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ): """simple docstring""" with torch.no_grad(): __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ ) __UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device ) __UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device ) __UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) __UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) if args.print_predictions: for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) ) return answers def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) __UpperCAmelCase : str = parser.parse_args() __UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = {} if args.model_type is None: __UpperCAmelCase : str = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration __UpperCAmelCase : Dict = args.n_docs if args.index_name is not None: __UpperCAmelCase : Union[str, Any] = args.index_name if args.index_path is not None: __UpperCAmelCase : Dict = args.index_path else: __UpperCAmelCase : str = BartForConditionalGeneration __UpperCAmelCase : str = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k __UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) __UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ ) model.retriever.init_retrieval() else: __UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: __UpperCAmelCase : Union[str, Any] = [] for line in tqdm(lowerCAmelCase__ ): questions.append(line.strip() ) if len(lowerCAmelCase__ ) == args.eval_batch_size: __UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" ) preds_file.flush() __UpperCAmelCase : List[str] = [] if len(lowerCAmelCase__ ) > 0: __UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) ) preds_file.flush() score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": _UpperCamelCase = get_args() main(args)
16
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _A ( unittest.TestCase ): def __A ( self ) -> str: '''simple docstring''' if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=__UpperCAmelCase , ) assert hasattr(self , """env""" ) def __A ( self , __UpperCAmelCase=1 ) -> Optional[Any]: '''simple docstring''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-single' , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) def __A ( self ) -> Tuple: '''simple docstring''' # create estimator __UpperCAmelCase : str = self.create_estimator() # run training estimator.fit() # result dataframe __UpperCAmelCase : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __UpperCAmelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) __UpperCAmelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __UpperCAmelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __UpperCAmelCase )
16
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _A : @staticmethod def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 ) __UpperCAmelCase : Tuple = len(__UpperCAmelCase ) self.assertGreater(__UpperCAmelCase , 0 ) self.assertEqual( __UpperCAmelCase , [ { """score""": ANY(__UpperCAmelCase ), """label""": ANY(__UpperCAmelCase ), """box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )}, } for i in range(__UpperCAmelCase ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> Tuple: '''simple docstring''' pass @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) __UpperCAmelCase : str = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) __UpperCAmelCase : Any = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> List[str]: '''simple docstring''' pass @require_torch @slow def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = 0.2 __UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 2 __UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SEWForCTC''', '''SEWForSequenceClassification''', '''SEWModel''', '''SEWPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} _UpperCamelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } _UpperCamelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } _UpperCamelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[Any] = ConvBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars ): __UpperCAmelCase : Dict = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) ) __UpperCAmelCase : Union[str, Any] = do_lower_case __UpperCAmelCase : str = strip_accents __UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars __UpperCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase ) __UpperCAmelCase : List[Any] = do_lower_case def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
16
1
'''simple docstring''' import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase = '''▁''' _UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[Any] = BigBirdTokenizer _SCREAMING_SNAKE_CASE : Optional[Any] = BigBirdTokenizerFast _SCREAMING_SNAKE_CASE : int = True _SCREAMING_SNAKE_CASE : int = True def __A ( self ) -> int: '''simple docstring''' super().setUp() __UpperCAmelCase : List[Any] = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = """<s>""" __UpperCAmelCase : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """[MASK]""" ) self.assertEqual(len(__UpperCAmelCase ) , 1_004 ) def __A ( self ) -> Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def __A ( self ) -> Optional[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : Dict = self.get_rust_tokenizer() __UpperCAmelCase : int = """I was born in 92000, and this is falsé.""" __UpperCAmelCase : Tuple = tokenizer.tokenize(__UpperCAmelCase ) __UpperCAmelCase : Dict = rust_tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : str = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) __UpperCAmelCase : List[str] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__UpperCAmelCase ) __UpperCAmelCase : str = rust_tokenizer.encode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __UpperCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) __UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def __A ( self ) -> Any: '''simple docstring''' return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = """Hello World!""" __UpperCAmelCase : str = [65, 18_536, 2_260, 101, 66] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Tuple = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) # fmt: off __UpperCAmelCase : List[Any] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231 # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @require_torch @slow def __A ( self ) -> Dict: '''simple docstring''' import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence __UpperCAmelCase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] __UpperCAmelCase : Optional[int] = """ """.join(__UpperCAmelCase ) __UpperCAmelCase : Tuple = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="""pt""" , return_token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : Tuple = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : Dict = BigBirdConfig(attention_type="""original_full""" ) __UpperCAmelCase : Optional[int] = BigBirdModel(__UpperCAmelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__UpperCAmelCase ) model(**__UpperCAmelCase ) @slow def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" ) __UpperCAmelCase : List[str] = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids ) self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" ) @slow def __A ( self ) -> str: '''simple docstring''' # fmt: off __UpperCAmelCase : Optional[Any] = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCamelCase = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''OwlViTFeatureExtractor'''] _UpperCamelCase = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: _UpperCamelCase = None _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} _UpperCamelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } _UpperCamelCase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } _UpperCamelCase = '''▁''' class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[str] = AlbertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> Tuple: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __UpperCAmelCase : Optional[Any] = ( AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token ) super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : Any = do_lower_case __UpperCAmelCase : Any = remove_space __UpperCAmelCase : Optional[int] = keep_accents __UpperCAmelCase : List[str] = vocab_file __UpperCAmelCase : Dict = False if not self.vocab_file else True def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : List[str] = [self.sep_token_id] __UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCAmelCase : Union[str, Any] = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
16
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor _UpperCamelCase = logging.get_logger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
16
1
'''simple docstring''' from functools import lru_cache def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Tuple = 2 __UpperCAmelCase : int = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(lowerCAmelCase__ ) if n > 1: factors.add(lowerCAmelCase__ ) return factors @lru_cache def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" return len(unique_prime_factors(lowerCAmelCase__ ) ) def lowercase_ ( lowerCAmelCase__ : list ): """simple docstring""" return len(set(lowerCAmelCase__ ) ) in (0, 1) def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = 2 while True: # Increment each value of a generated range __UpperCAmelCase : List[str] = [base + i for i in range(lowerCAmelCase__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. __UpperCAmelCase : Dict = [upf_len(lowerCAmelCase__ ) for x in group] checker.append(lowerCAmelCase__ ) # If all numbers in the list are equal, return the group variable. if equality(lowerCAmelCase__ ): return group # Increment our base variable by 1 base += 1 def lowercase_ ( lowerCAmelCase__ : int = 4 ): """simple docstring""" __UpperCAmelCase : int = run(lowerCAmelCase__ ) return results[0] if len(lowerCAmelCase__ ) else None if __name__ == "__main__": print(solution())
16
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _SCREAMING_SNAKE_CASE : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) __UpperCAmelCase : List[Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) __UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : Dict = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) __UpperCAmelCase : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' import torch __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = pipeline("""text-classification""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : Any = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : int = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_classifier, ["HuggingFace is in", "This is another test"] def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : int = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __UpperCAmelCase : Union[str, Any] = """HuggingFace is in""" __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) __UpperCAmelCase : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""] __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase ) __UpperCAmelCase : Any = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , ) __UpperCAmelCase : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} __UpperCAmelCase : Optional[int] = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __UpperCAmelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(__UpperCAmelCase ): text_classifier(__UpperCAmelCase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
16
1
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : int = KandinskyVaaPriorPipeline _SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"] _SCREAMING_SNAKE_CASE : Any = ["prompt", "negative_prompt"] _SCREAMING_SNAKE_CASE : Optional[Any] = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] _SCREAMING_SNAKE_CASE : Dict = False @property def __A ( self ) -> Any: '''simple docstring''' return 32 @property def __A ( self ) -> List[str]: '''simple docstring''' return 32 @property def __A ( self ) -> List[Any]: '''simple docstring''' return self.time_input_dim @property def __A ( self ) -> Optional[int]: '''simple docstring''' return self.time_input_dim * 4 @property def __A ( self ) -> Dict: '''simple docstring''' return 100 @property def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def __A ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(__UpperCAmelCase ) @property def __A ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : int = { """num_attention_heads""": 2, """attention_head_dim""": 12, """embedding_dim""": self.text_embedder_hidden_size, """num_layers""": 1, } __UpperCAmelCase : Tuple = PriorTransformer(**__UpperCAmelCase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def __A ( self ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : List[str] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __UpperCAmelCase : str = CLIPVisionModelWithProjection(__UpperCAmelCase ) return model @property def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : List[str] = CLIPImageProcessor( crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , ) return image_processor def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : int = self.dummy_prior __UpperCAmelCase : List[str] = self.dummy_image_encoder __UpperCAmelCase : int = self.dummy_text_encoder __UpperCAmelCase : Dict = self.dummy_tokenizer __UpperCAmelCase : Any = self.dummy_image_processor __UpperCAmelCase : Optional[Any] = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , ) __UpperCAmelCase : Tuple = { """prior""": prior, """image_encoder""": image_encoder, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """scheduler""": scheduler, """image_processor""": image_processor, } return components def __A ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> int: '''simple docstring''' if str(__UpperCAmelCase ).startswith("""mps""" ): __UpperCAmelCase : Union[str, Any] = torch.manual_seed(__UpperCAmelCase ) else: __UpperCAmelCase : str = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCAmelCase : Any = { """prompt""": """horse""", """generator""": generator, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[Any] = """cpu""" __UpperCAmelCase : str = self.get_dummy_components() __UpperCAmelCase : Any = self.pipeline_class(**__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) ) __UpperCAmelCase : Tuple = output.image_embeds __UpperCAmelCase : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0] __UpperCAmelCase : str = image[0, -10:] __UpperCAmelCase : Dict = image_from_tuple[0, -10:] assert image.shape == (1, 32) __UpperCAmelCase : Union[str, Any] = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Dict = torch_device == """cpu""" __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : str = False self._test_inference_batch_single_identical( test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , ) @skip_mps def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = torch_device == """cpu""" __UpperCAmelCase : int = False self._test_attention_slicing_forward_pass( test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
16
'''simple docstring''' from ..utils import DummyObject, requires_backends class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] )
16
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = '''▁''' _UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _UpperCamelCase = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } _UpperCamelCase = { '''facebook/xglm-564M''': 2048, } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : Optional[int] = ["input_ids", "attention_mask"] def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: '''simple docstring''' __UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer __UpperCAmelCase : str = 7 __UpperCAmelCase : List[Any] = [f'<madeupword{i}>' for i in range(self.num_madeup_words )] __UpperCAmelCase : Any = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) __UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) __UpperCAmelCase : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase : List[str] = 1 # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} __UpperCAmelCase : List[Any] = len(self.sp_model ) __UpperCAmelCase : Optional[Any] = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__UpperCAmelCase ) __UpperCAmelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.__dict__.copy() __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase : int = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a __UpperCAmelCase : Optional[int] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Tuple = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __A ( self ) -> Any: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __A ( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase : List[Any] = self.sp_model.PieceToId(__UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __A ( self , __UpperCAmelCase ) -> int: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __A ( self , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCAmelCase : int = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: __UpperCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
16
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class _A : def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase ) __UpperCAmelCase : List[str] = length __UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa ) __UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Dict: '''simple docstring''' return self.length def __getitem__( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Any = True def __A ( self , __UpperCAmelCase=None ) -> str: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : Optional[int] = False return x * self.a[0] + self.b[0] class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]: '''simple docstring''' super().__init__() __UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : str = True def __A ( self , __UpperCAmelCase=None ) -> Tuple: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : int = False return x * self.a + self.b def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} __UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" ) __UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )} def tokenize_function(lowerCAmelCase__ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : List[Any] = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" ) if "label" in examples: __UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCAmelCase : Tuple = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCAmelCase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 ) __UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
16
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Dict = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[Any] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[str] = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = LlamaModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : Optional[Any] = """single_label_classification""" __UpperCAmelCase : int = input_dict["""input_ids"""] __UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : str = """multi_label_classification""" __UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> Dict: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """ __UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" ) __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
16
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None @property def __A ( self ) -> Optional[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = (3, 32, 128) __UpperCAmelCase : Tuple = tempfile.mkdtemp() # fmt: off __UpperCAmelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on __UpperCAmelCase : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + """\n""" ) __UpperCAmelCase : List[Any] = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } __UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) return image_input def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = self.get_tokenizer() __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) __UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[str] = self.prepare_image_inputs() __UpperCAmelCase : str = image_processor(__UpperCAmelCase , return_tensors="""np""" ) __UpperCAmelCase : int = processor(images=__UpperCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Dict = """test""" __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = """test""" __UpperCAmelCase : int = self.prepare_image_inputs() __UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase : Optional[Any] = processor.char_decode(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase ) __UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in decoded_tok] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Optional[Any] = self.get_tokenizer() __UpperCAmelCase : Any = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : str = None __UpperCAmelCase : Dict = self.prepare_image_inputs() __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Any = self.get_image_processor() __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : str = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = torch.randn(1 , 27 , 38 ) __UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 50_257 ) __UpperCAmelCase : Any = torch.randn(1 , 27 , 30_522 ) __UpperCAmelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
16
1
'''simple docstring''' import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowercase_ ( lowerCAmelCase__ : int ): """simple docstring""" if "model" in orig_key: __UpperCAmelCase : int = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: __UpperCAmelCase : Optional[int] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: __UpperCAmelCase : List[str] = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: __UpperCAmelCase : Any = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: __UpperCAmelCase : int = orig_key.split(""".""" )[0].split("""_""" )[-1] __UpperCAmelCase : List[str] = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' ) if "mha.attn" in orig_key: __UpperCAmelCase : List[Any] = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: __UpperCAmelCase : Tuple = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: __UpperCAmelCase : Tuple = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: __UpperCAmelCase : List[str] = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: __UpperCAmelCase : Union[str, Any] = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: __UpperCAmelCase : Any = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: __UpperCAmelCase : Optional[int] = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: __UpperCAmelCase : Optional[Any] = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: __UpperCAmelCase : int = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: __UpperCAmelCase : List[Any] = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: __UpperCAmelCase : Optional[int] = """yoso.""" + orig_key return orig_key def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ): """simple docstring""" for key in orig_state_dict.copy().keys(): __UpperCAmelCase : str = orig_state_dict.pop(lowerCAmelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: __UpperCAmelCase : List[Any] = val __UpperCAmelCase : Optional[Any] = orig_state_dict["""cls.predictions.decoder.bias"""] __UpperCAmelCase : str = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model_state_dict"""] __UpperCAmelCase : List[str] = YosoConfig.from_json_file(lowerCAmelCase__ ) __UpperCAmelCase : str = YosoForMaskedLM(lowerCAmelCase__ ) __UpperCAmelCase : Dict = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase__ ) print(model.load_state_dict(lowerCAmelCase__ ) ) model.eval() model.save_pretrained(lowerCAmelCase__ ) print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for YOSO model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _UpperCamelCase = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
16
'''simple docstring''' from collections.abc import Sequence def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ): """simple docstring""" if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) __UpperCAmelCase : Any = nums[0] for i in range(1 , len(lowerCAmelCase__ ) ): __UpperCAmelCase : Union[str, Any] = nums[i] __UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _UpperCamelCase = int(input('''Enter number of elements : ''').strip()) _UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = data __UpperCAmelCase : int = previous __UpperCAmelCase : Union[str, Any] = next_node def __str__( self ) -> str: '''simple docstring''' return f'{self.data}' def __A ( self ) -> int: '''simple docstring''' return self.data def __A ( self ) -> List[str]: '''simple docstring''' return self.next def __A ( self ) -> str: '''simple docstring''' return self.previous class _A : def __init__( self , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = head def __iter__( self ) -> str: '''simple docstring''' return self def __A ( self ) -> str: '''simple docstring''' if not self.current: raise StopIteration else: __UpperCAmelCase : List[str] = self.current.get_data() __UpperCAmelCase : int = self.current.get_next() return value class _A : def __init__( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = None # First node in list __UpperCAmelCase : List[str] = None # Last node in list def __str__( self ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = self.head __UpperCAmelCase : Optional[int] = [] while current is not None: nodes.append(current.get_data() ) __UpperCAmelCase : Any = current.get_next() return " ".join(str(__UpperCAmelCase ) for node in nodes ) def __contains__( self , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.head while current: if current.get_data() == value: return True __UpperCAmelCase : Optional[Any] = current.get_next() return False def __iter__( self ) -> str: '''simple docstring''' return LinkedListIterator(self.head ) def __A ( self ) -> List[Any]: '''simple docstring''' if self.head: return self.head.get_data() return None def __A ( self ) -> Optional[Any]: '''simple docstring''' if self.tail: return self.tail.get_data() return None def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: __UpperCAmelCase : str = node __UpperCAmelCase : List[str] = node else: self.insert_before_node(self.head , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: self.set_head(__UpperCAmelCase ) else: self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[int] = Node(__UpperCAmelCase ) if self.head is None: self.set_head(__UpperCAmelCase ) else: self.set_tail(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Tuple = node __UpperCAmelCase : List[Any] = node.previous if node.get_previous() is None: __UpperCAmelCase : str = node_to_insert else: __UpperCAmelCase : Optional[Any] = node_to_insert __UpperCAmelCase : List[Any] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : List[str] = node __UpperCAmelCase : Union[str, Any] = node.next if node.get_next() is None: __UpperCAmelCase : Dict = node_to_insert else: __UpperCAmelCase : Any = node_to_insert __UpperCAmelCase : List[str] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Optional[Any] = Node(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.head while node: if current_position == position: self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase ) return current_position += 1 __UpperCAmelCase : int = node.next self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Node: '''simple docstring''' __UpperCAmelCase : Dict = self.head while node: if node.get_data() == item: return node __UpperCAmelCase : List[str] = node.get_next() raise Exception("""Node not found""" ) def __A ( self , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if (node := self.get_node(__UpperCAmelCase )) is not None: if node == self.head: __UpperCAmelCase : Optional[int] = self.head.get_next() if node == self.tail: __UpperCAmelCase : Union[str, Any] = self.tail.get_previous() self.remove_node_pointers(__UpperCAmelCase ) @staticmethod def __A ( __UpperCAmelCase ) -> None: '''simple docstring''' if node.get_next(): __UpperCAmelCase : Optional[Any] = node.previous if node.get_previous(): __UpperCAmelCase : int = node.next __UpperCAmelCase : Tuple = None __UpperCAmelCase : Union[str, Any] = None def __A ( self ) -> List[Any]: '''simple docstring''' return self.head is None def lowercase_ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
16
1
'''simple docstring''' from math import isclose, sqrt def lowercase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ): """simple docstring""" __UpperCAmelCase : Any = point_y / 4 / point_x __UpperCAmelCase : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __UpperCAmelCase : Any = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __UpperCAmelCase : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __UpperCAmelCase : Optional[int] = outgoing_gradient**2 + 4 __UpperCAmelCase : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __UpperCAmelCase : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 100 __UpperCAmelCase : Optional[int] = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __UpperCAmelCase : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __UpperCAmelCase : str = x_minus if isclose(lowerCAmelCase__ , lowerCAmelCase__ ) else x_plus __UpperCAmelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def lowercase_ ( lowerCAmelCase__ : float = 1.4 , lowerCAmelCase__ : float = -9.6 ): """simple docstring""" __UpperCAmelCase : int = 0 __UpperCAmelCase : float = first_x_coord __UpperCAmelCase : float = first_y_coord __UpperCAmelCase : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = next_point(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F'{solution() = }')
16
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class _A : _SCREAMING_SNAKE_CASE : List[str] _SCREAMING_SNAKE_CASE : Optional[str] = None # Automatically constructed _SCREAMING_SNAKE_CASE : ClassVar[str] = "dict" _SCREAMING_SNAKE_CASE : ClassVar[Any] = None _SCREAMING_SNAKE_CASE : str = field(default="Translation" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ) -> Any: '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class _A : _SCREAMING_SNAKE_CASE : Optional[List] = None _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : Optional[str] = None # Automatically constructed _SCREAMING_SNAKE_CASE : ClassVar[str] = "dict" _SCREAMING_SNAKE_CASE : ClassVar[Any] = None _SCREAMING_SNAKE_CASE : str = field(default="TranslationVariableLanguages" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = sorted(set(self.languages ) ) if self.languages else None __UpperCAmelCase : int = len(self.languages ) if self.languages else None def __call__( self ) -> Optional[Any]: '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def __A ( self , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = set(self.languages ) if self.languages and set(__UpperCAmelCase ) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(__UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__UpperCAmelCase )}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __UpperCAmelCase : Dict = [] for lang, text in translation_dict.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = zip(*sorted(__UpperCAmelCase ) ) return {"language": languages, "translation": translations} def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
16
1
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ): """simple docstring""" return "\n".join( f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
16
'''simple docstring''' from statistics import mean import numpy as np def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Tuple = 0 # Number of processes finished __UpperCAmelCase : Optional[int] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __UpperCAmelCase : Tuple = [0] * no_of_process # List to include calculation results __UpperCAmelCase : int = [0] * no_of_process # Sort by arrival time. __UpperCAmelCase : Dict = [burst_time[i] for i in np.argsort(lowerCAmelCase__ )] __UpperCAmelCase : Union[str, Any] = [process_name[i] for i in np.argsort(lowerCAmelCase__ )] arrival_time.sort() while no_of_process > finished_process_count: __UpperCAmelCase : Dict = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __UpperCAmelCase : Any = arrival_time[i] __UpperCAmelCase : Any = 0 # Index showing the location of the process being performed __UpperCAmelCase : Any = 0 # Saves the current response ratio. __UpperCAmelCase : List[str] = 0 for i in range(0 , lowerCAmelCase__ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __UpperCAmelCase : Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __UpperCAmelCase : Tuple = temp __UpperCAmelCase : List[str] = i # Calculate the turn around time __UpperCAmelCase : Tuple = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __UpperCAmelCase : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Optional[int] = [0] * no_of_process for i in range(0 , lowerCAmelCase__ ): __UpperCAmelCase : List[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _UpperCamelCase = 5 _UpperCamelCase = ['''A''', '''B''', '''C''', '''D''', '''E'''] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = [1, 2, 3, 4, 5] _UpperCamelCase = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _UpperCamelCase = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
16
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCamelCase = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : Union[str, Any] = seq_length __UpperCAmelCase : int = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[str] = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : str = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : Dict = type_sequence_label_size __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : Optional[Any] = num_choices __UpperCAmelCase : int = scope def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[Any] = None if self.use_input_mask: __UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Optional[int] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> List[str]: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_config() __UpperCAmelCase : List[Any] = 300 return config def __A ( self ) -> Dict: '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = self.prepare_config_and_inputs() __UpperCAmelCase : Tuple = True __UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : List[str] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[Any] = config_and_inputs __UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : int = False _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Dict = () def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = MraModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : List[Any] = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Any: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""MRA does not output attentions""" ) def __A ( self ) -> List[Any]: '''simple docstring''' return @require_torch class _A ( unittest.TestCase ): @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0] __UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : int = model(__UpperCAmelCase )[0] __UpperCAmelCase : Union[str, Any] = 50_265 __UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : Any = model(__UpperCAmelCase )[0] __UpperCAmelCase : Dict = 50_265 __UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : str = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
1
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[8, 16, 32, 64] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=1 , ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = parent __UpperCAmelCase : List[Any] = batch_size __UpperCAmelCase : int = image_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : int = embeddings_size __UpperCAmelCase : List[Any] = hidden_sizes __UpperCAmelCase : Union[str, Any] = depths __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_labels __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Dict = num_labels __UpperCAmelCase : int = scope __UpperCAmelCase : Optional[Any] = len(__UpperCAmelCase ) __UpperCAmelCase : List[str] = out_features __UpperCAmelCase : List[str] = out_indices __UpperCAmelCase : Any = num_groups def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : List[Any] = None if self.use_labels: __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def __A ( self ) -> Optional[int]: '''simple docstring''' return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = BitModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.num_labels __UpperCAmelCase : Optional[Any] = BitForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = BitBackbone(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[str] = model(__UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Any = BitBackbone(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Any = model(__UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = config_and_inputs __UpperCAmelCase : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[Any] = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : str = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = BitModelTester(self ) __UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self ) -> List[str]: '''simple docstring''' return @unittest.skip(reason="""Bit does not output attentions""" ) def __A ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def __A ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def __A ( self ) -> List[Any]: '''simple docstring''' pass def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()] __UpperCAmelCase : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class(config=__UpperCAmelCase ) for name, module in model.named_modules(): if isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def __A ( self ) -> int: '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __UpperCAmelCase : Dict = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : List[Any] = self.model_tester.num_stages self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: __UpperCAmelCase : Any = layer_type __UpperCAmelCase : Dict = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : List[str] = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def __A ( self ) -> str: '''simple docstring''' pass def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Any: '''simple docstring''' for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : int = BitModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def __A ( self ) -> Optional[int]: '''simple docstring''' return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = self.default_image_processor __UpperCAmelCase : int = prepare_img() __UpperCAmelCase : int = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(**__UpperCAmelCase ) # verify the logits __UpperCAmelCase : Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __UpperCAmelCase : Dict = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) ) @require_torch class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (BitBackbone,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Dict = BitConfig _SCREAMING_SNAKE_CASE : Optional[int] = False def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Tuple = BitModelTester(self )
16
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Any = image_size __UpperCAmelCase : Dict = patch_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : List[Any] = embed_dim __UpperCAmelCase : str = depths __UpperCAmelCase : Dict = num_heads __UpperCAmelCase : str = window_size __UpperCAmelCase : int = mlp_ratio __UpperCAmelCase : Union[str, Any] = qkv_bias __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = drop_path_rate __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Optional[int] = use_absolute_embeddings __UpperCAmelCase : Any = patch_norm __UpperCAmelCase : Union[str, Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : Any = scope __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : Optional[int] = type_sequence_label_size __UpperCAmelCase : int = encoder_stride def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Tuple = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __A ( self ) -> Dict: '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = SwinvaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) __UpperCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = SwinvaForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : str = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = self.type_sequence_label_size __UpperCAmelCase : str = SwinvaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs __UpperCAmelCase : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : List[str] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Dict = False _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[str] = SwinvaModelTester(self ) __UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 ) def __A ( self ) -> Any: '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def __A ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def __A ( self ) -> Dict: '''simple docstring''' pass def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Tuple = model_class(__UpperCAmelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : str = [*signature.parameters.keys()] __UpperCAmelCase : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = True for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : int = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : str = outputs.attentions __UpperCAmelCase : Any = len(self.model_tester.depths ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase : Dict = True __UpperCAmelCase : int = config.window_size**2 __UpperCAmelCase : Any = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : Dict = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __UpperCAmelCase : Dict = len(__UpperCAmelCase ) # Check attention is always last and order is fine __UpperCAmelCase : Any = True __UpperCAmelCase : Any = True __UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): __UpperCAmelCase : Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __UpperCAmelCase : Optional[int] = 2 self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) ) __UpperCAmelCase : Tuple = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : List[Any] = outputs.hidden_states __UpperCAmelCase : List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # Swinv2 has a different seq_length __UpperCAmelCase : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __UpperCAmelCase : int = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = reshaped_hidden_states[0].shape __UpperCAmelCase : Any = ( reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = 3 __UpperCAmelCase : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase : int = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Tuple = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = SwinvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Tuple = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCAmelCase : List[Any] = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class _A ( unittest.TestCase ): @cached_property def __A ( self ) -> int: '''simple docstring''' return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( __UpperCAmelCase ) __UpperCAmelCase : Tuple = self.default_image_processor __UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase : Any = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(**__UpperCAmelCase ) # verify the logits __UpperCAmelCase : int = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging _UpperCamelCase = logging.get_logger(__name__) # TODO: upload to AWS _UpperCamelCase = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = "retribert" def __init__( self , __UpperCAmelCase=30_522 , __UpperCAmelCase=768 , __UpperCAmelCase=8 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=128 , __UpperCAmelCase=0 , **__UpperCAmelCase , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) __UpperCAmelCase : List[str] = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : int = hidden_act __UpperCAmelCase : Union[str, Any] = intermediate_size __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : List[str] = type_vocab_size __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : List[str] = layer_norm_eps __UpperCAmelCase : Dict = share_encoders __UpperCAmelCase : int = projection_dim
16
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( lowerCAmelCase__ : List[str] ): """simple docstring""" if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase__ ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCAmelCase : int = size if size is not None else {"""shortest_edge""": 256} __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : List[str] = size __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : Any = crop_size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Dict = do_rescale __UpperCAmelCase : List[str] = rescale_factor __UpperCAmelCase : Dict = offset __UpperCAmelCase : List[str] = do_normalize __UpperCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" in size: __UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size["""shortest_edge"""] , default_to_square=__UpperCAmelCase ) elif "height" in size and "width" in size: __UpperCAmelCase : Any = (size["""height"""], size["""width"""]) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : Any = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = image.astype(np.floataa ) if offset: __UpperCAmelCase : Tuple = image - (scale / 2) return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : Optional[Any] = to_numpy_array(__UpperCAmelCase ) if do_resize: __UpperCAmelCase : Optional[int] = self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) if do_center_crop: __UpperCAmelCase : Optional[int] = self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase ) if do_rescale: __UpperCAmelCase : int = self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , offset=__UpperCAmelCase ) if do_normalize: __UpperCAmelCase : List[str] = self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) return image def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' __UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : List[Any] = resample if resample is not None else self.resample __UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : List[Any] = offset if offset is not None else self.offset __UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : int = image_std if image_std is not None else self.image_std __UpperCAmelCase : Any = size if size is not None else self.size __UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : str = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" ) if not valid_images(__UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) __UpperCAmelCase : int = make_batched(__UpperCAmelCase ) __UpperCAmelCase : Tuple = [ [ self._preprocess_image( image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , offset=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , ) for img in video ] for video in videos ] __UpperCAmelCase : Tuple = {"""pixel_values""": videos} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
16
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["image_processor", "tokenizer"] _SCREAMING_SNAKE_CASE : List[str] = "FlavaImageProcessor" _SCREAMING_SNAKE_CASE : Dict = ("BertTokenizer", "BertTokenizerFast") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) __UpperCAmelCase : List[Any] = kwargs.pop("""feature_extractor""" ) __UpperCAmelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Dict = self.image_processor def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : Optional[Any] = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) if images is not None: __UpperCAmelCase : List[Any] = self.image_processor( __UpperCAmelCase , return_image_mask=__UpperCAmelCase , return_codebook_pixels=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) if text is not None and images is not None: encoding.update(__UpperCAmelCase ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = self.tokenizer.model_input_names __UpperCAmelCase : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __A ( self ) -> List[str]: '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCAmelCase , ) return self.image_processor_class @property def __A ( self ) -> int: '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCAmelCase , ) return self.image_processor
16
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Dict = LDMTextToImagePipeline _SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } _SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } _SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) __UpperCAmelCase : Any = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __UpperCAmelCase : Tuple = CLIPTextModel(__UpperCAmelCase ) __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __UpperCAmelCase : Dict = { """unet""": unet, """scheduler""": scheduler, """vqvae""": vae, """bert""": text_encoder, """tokenizer""": tokenizer, } return components def __A ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Any: '''simple docstring''' if str(__UpperCAmelCase ).startswith("""mps""" ): __UpperCAmelCase : int = torch.manual_seed(__UpperCAmelCase ) else: __UpperCAmelCase : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCAmelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : Dict = self.get_dummy_components() __UpperCAmelCase : Tuple = LDMTextToImagePipeline(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = pipe(**__UpperCAmelCase ).images __UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __UpperCAmelCase : Dict = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : int = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) __UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) __UpperCAmelCase : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.get_inputs(__UpperCAmelCase ) __UpperCAmelCase : int = pipe(**__UpperCAmelCase ).images __UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) __UpperCAmelCase : Tuple = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) __UpperCAmelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) __UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = self.get_inputs(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = pipe(**__UpperCAmelCase ).images[0] __UpperCAmelCase : Tuple = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) __UpperCAmelCase : Dict = np.abs(expected_image - image ).max() assert max_diff < 1E-3
16
1
'''simple docstring''' import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : str = inspect.getfile(accelerate.test_utils ) _SCREAMING_SNAKE_CASE : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] ) _SCREAMING_SNAKE_CASE : List[str] = ["accelerate", "launch"] _SCREAMING_SNAKE_CASE : Any = Path.home() / ".cache/huggingface/accelerate" _SCREAMING_SNAKE_CASE : List[str] = "default_config.yaml" _SCREAMING_SNAKE_CASE : int = config_folder / config_file _SCREAMING_SNAKE_CASE : Dict = config_folder / "_default_config.yaml" _SCREAMING_SNAKE_CASE : Optional[int] = Path("tests/test_configs" ) @classmethod def __A ( cls ) -> Optional[Any]: '''simple docstring''' if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def __A ( cls ) -> Optional[int]: '''simple docstring''' if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def __A ( self ) -> Optional[Any]: '''simple docstring''' for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ): with self.subTest(config_file=__UpperCAmelCase ): execute_subprocess_async( self.base_cmd + ["""--config_file""", str(__UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() ) def __A ( self ) -> List[str]: '''simple docstring''' execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() ) class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[Any] = "test-tpu" _SCREAMING_SNAKE_CASE : Union[str, Any] = "us-central1-a" _SCREAMING_SNAKE_CASE : int = "ls" _SCREAMING_SNAKE_CASE : Optional[Any] = ["accelerate", "tpu-config"] _SCREAMING_SNAKE_CASE : List[Any] = "cd /usr/share" _SCREAMING_SNAKE_CASE : Optional[Any] = "tests/test_samples/test_command_file.sh" _SCREAMING_SNAKE_CASE : int = "Running gcloud compute tpus tpu-vm ssh" def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = run_command( self.cmd + ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=__UpperCAmelCase , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __UpperCAmelCase , ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] , return_stdout=__UpperCAmelCase , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __UpperCAmelCase , ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=__UpperCAmelCase ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __UpperCAmelCase , ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=__UpperCAmelCase , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __UpperCAmelCase , ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Any = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--command""", """echo \"Hello World\"""", """--debug""", ] , return_stdout=__UpperCAmelCase , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , __UpperCAmelCase , ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=__UpperCAmelCase , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __UpperCAmelCase , ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : str = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command_file""", self.command_file, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] , return_stdout=__UpperCAmelCase , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __UpperCAmelCase , ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=__UpperCAmelCase , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , __UpperCAmelCase , ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Dict = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--accelerate_version""", """12.0.0""", """--debug""", ] , return_stdout=__UpperCAmelCase , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , __UpperCAmelCase , )
16
'''simple docstring''' from __future__ import annotations from typing import Any class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = row, column __UpperCAmelCase : Union[str, Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )] def __str__( self ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier __UpperCAmelCase : Optional[Any] = 0 for row_vector in self.array: for obj in row_vector: __UpperCAmelCase : Union[str, Any] = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) ) __UpperCAmelCase : Optional[int] = f'%{max_element_length}s' # Make string and return def single_line(__UpperCAmelCase ) -> str: nonlocal string_format_identifier __UpperCAmelCase : Any = """[""" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array ) return s def __repr__( self ) -> str: '''simple docstring''' return str(self ) def __A ( self , __UpperCAmelCase ) -> bool: '''simple docstring''' if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , __UpperCAmelCase ) -> Any: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' assert self.validate_indicies(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = value def __add__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == another.row and self.column == another.column # Add __UpperCAmelCase : Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : Dict = -self[r, c] return result def __sub__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' return self + (-another) def __mul__( self , __UpperCAmelCase ) -> Matrix: '''simple docstring''' if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication __UpperCAmelCase : Optional[int] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[Any] = self[r, c] * another return result elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication assert self.column == another.row __UpperCAmelCase : Dict = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __UpperCAmelCase : List[Any] = f'Unsupported type given for another ({type(__UpperCAmelCase )})' raise TypeError(__UpperCAmelCase ) def __A ( self ) -> Matrix: '''simple docstring''' __UpperCAmelCase : Dict = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __UpperCAmelCase : List[str] = self[r, c] return result def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __UpperCAmelCase : Optional[Any] = v.transpose() __UpperCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Dict = Matrix(3 , 3 , 0 ) for i in range(3 ): __UpperCAmelCase : Tuple = 1 print(f'a^(-1) is {ainv}' ) # u, v __UpperCAmelCase : Dict = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 1, 2, -3 __UpperCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = 4, -2, 5 print(f'u is {u}' ) print(f'v is {v}' ) print(f'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}' ) def lowercase_ ( ): """simple docstring""" import doctest doctest.testmod() testa()
16
1
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _UpperCamelCase = get_logger(__name__) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=0 ): """simple docstring""" os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) with FSDP.state_dict_type( lowerCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __UpperCAmelCase : Any = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __UpperCAmelCase : Optional[int] = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' __UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) if accelerator.process_index == 0: logger.info(f'Saving model to {output_model_file}' ) torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __UpperCAmelCase : Union[str, Any] = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) __UpperCAmelCase : int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) logger.info(f'Saving model to {output_model_file}' ) torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __UpperCAmelCase : Optional[int] = os.path.join(lowerCAmelCase__ , f'{MODEL_NAME}_{model_index}' ) os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) logger.info(f'Saving model to {ckpt_dir}' ) __UpperCAmelCase : str = {"""model""": state_dict} dist_cp.save_state_dict( state_dict=lowerCAmelCase__ , storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase__ ) , planner=DefaultSavePlanner() , ) logger.info(f'Model saved to {ckpt_dir}' ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( lowerCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(lowerCAmelCase__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( """Set the `sync_module_states` flag to `True` so that model states are synced across processes when """ """initializing FSDP object""" ) return __UpperCAmelCase : Dict = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' __UpperCAmelCase : Optional[int] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) logger.info(f'Loading model from {input_model_file}' ) __UpperCAmelCase : Any = torch.load(lowerCAmelCase__ ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __UpperCAmelCase : Optional[Any] = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) __UpperCAmelCase : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) logger.info(f'Loading model from {input_model_file}' ) __UpperCAmelCase : Tuple = torch.load(lowerCAmelCase__ ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __UpperCAmelCase : str = ( os.path.join(lowerCAmelCase__ , f'{MODEL_NAME}_{model_index}' ) if f'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(f'Loading model from {ckpt_dir}' ) __UpperCAmelCase : Tuple = {"""model""": model.state_dict()} dist_cp.load_state_dict( state_dict=lowerCAmelCase__ , storage_reader=dist_cp.FileSystemReader(lowerCAmelCase__ ) , planner=DefaultLoadPlanner() , ) __UpperCAmelCase : Optional[Any] = state_dict["""model"""] logger.info(f'Model loaded from {ckpt_dir}' ) model.load_state_dict(lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple=0 ): """simple docstring""" os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) with FSDP.state_dict_type( lowerCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __UpperCAmelCase : List[Any] = FSDP.optim_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __UpperCAmelCase : List[str] = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) __UpperCAmelCase : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) logger.info(f'Saving Optimizer state to {output_optimizer_file}' ) torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) logger.info(f'Optimizer state saved in {output_optimizer_file}' ) else: __UpperCAmelCase : Optional[Any] = os.path.join(lowerCAmelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) logger.info(f'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase__ ) , planner=DefaultSavePlanner() , ) logger.info(f'Optimizer state saved in {ckpt_dir}' ) def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( lowerCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __UpperCAmelCase : List[str] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __UpperCAmelCase : str = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) __UpperCAmelCase : Union[str, Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) logger.info(f'Loading Optimizer state from {input_optimizer_file}' ) __UpperCAmelCase : Dict = torch.load(lowerCAmelCase__ ) logger.info(f'Optimizer state loaded from {input_optimizer_file}' ) else: __UpperCAmelCase : Tuple = ( os.path.join(lowerCAmelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' ) if f'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(f'Loading Optimizer from {ckpt_dir}' ) __UpperCAmelCase : int = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(lowerCAmelCase__ ) , ) __UpperCAmelCase : Tuple = optim_state["""optimizer"""] logger.info(f'Optimizer loaded from {ckpt_dir}' ) __UpperCAmelCase : str = FSDP.optim_state_dict_to_load(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) optimizer.load_state_dict(lowerCAmelCase__ )
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCamelCase = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING _SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING def __A ( self ) -> Any: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""}, ] , ) __UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser""", }, ] , ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() __UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow @require_torch def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(__UpperCAmelCase ) @slow @require_tf def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2_201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 12_790, """token_str""": """ Lyon""", }, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) __UpperCAmelCase : Tuple = None __UpperCAmelCase : int = None self.run_pipeline_test(__UpperCAmelCase , [] ) @require_tf def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : str = None self.run_pipeline_test(__UpperCAmelCase , [] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) __UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = [ f'This is another {tokenizer.mask_token} test', ] return fill_masker, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = fill_masker.tokenizer __UpperCAmelCase : Union[str, Any] = fill_masker.model __UpperCAmelCase : Tuple = fill_masker( f'This is a {tokenizer.mask_token}' , ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , ) with self.assertRaises(__UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__UpperCAmelCase ): fill_masker("""This is""" ) self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = tokenizer.get_vocab() __UpperCAmelCase : Dict = sorted(vocab.keys() )[:2] # Pipeline argument __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase ) __UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Any = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Call argument __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Score equivalence __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs] __UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ) == set(__UpperCAmelCase ): __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] ) with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 ) __UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = tokenizer.get_vocab() __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) # top_k=2, ntargets=3 __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results __UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokenizer.get_vocab() # String duplicates + id duplicates __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]] __UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__UpperCAmelCase ) , 3 ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Dict = fill_masker( f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , )
16
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor _UpperCamelCase = logging.get_logger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
16
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"image": Image()} ) _SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"labels": ClassLabel} ) _SCREAMING_SNAKE_CASE : str = "image" _SCREAMING_SNAKE_CASE : str = "labels" def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , __UpperCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) __UpperCAmelCase : int = copy.deepcopy(self ) __UpperCAmelCase : str = self.label_schema.copy() __UpperCAmelCase : Optional[Any] = features[self.label_column] __UpperCAmelCase : Optional[int] = label_schema return task_template @property def __A ( self ) -> Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
16
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["image_processor", "tokenizer"] _SCREAMING_SNAKE_CASE : List[str] = "CLIPImageProcessor" _SCREAMING_SNAKE_CASE : Dict = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = kwargs.pop("""feature_extractor""" ) __UpperCAmelCase : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict: '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[Any] = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: __UpperCAmelCase : str = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: __UpperCAmelCase : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names __UpperCAmelCase : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
16
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Dict = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[Any] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[str] = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = LlamaModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : Optional[Any] = """single_label_classification""" __UpperCAmelCase : int = input_dict["""input_ids"""] __UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : str = """multi_label_classification""" __UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> Dict: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """ __UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" ) __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
16
1
'''simple docstring''' import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _UpperCamelCase = logging.get_logger(__name__) def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=None ): """simple docstring""" if "." in tensor_name: __UpperCAmelCase : Union[str, Any] = tensor_name.split(""".""" ) for split in splits[:-1]: __UpperCAmelCase : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if new_module is None: raise ValueError(f'{module} has no attribute {split}.' ) __UpperCAmelCase : Tuple = new_module __UpperCAmelCase : List[Any] = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' ) __UpperCAmelCase : Tuple = tensor_name in module._buffers __UpperCAmelCase : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None: raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' ) __UpperCAmelCase : Any = False __UpperCAmelCase : Tuple = False if is_buffer or not is_bitsandbytes_available(): __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : Dict = False else: __UpperCAmelCase : Optional[Any] = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) __UpperCAmelCase : Tuple = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: __UpperCAmelCase : List[str] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: __UpperCAmelCase : str = old_value.to(lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , torch.Tensor ): __UpperCAmelCase : Tuple = value.to("""cpu""" ) if value.dtype == torch.inta: __UpperCAmelCase : List[str] = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse( """0.37.2""" ) if not is_abit_serializable: raise ValueError( """Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """ """Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" ) else: __UpperCAmelCase : Optional[Any] = torch.tensor(lowerCAmelCase__ , device="""cpu""" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , lowerCAmelCase__ ) and fpaa_statistics is None: __UpperCAmelCase : Union[str, Any] = new_value.T __UpperCAmelCase : List[str] = old_value.__dict__ if is_abit: __UpperCAmelCase : int = bnb.nn.IntaParams(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ , **lowerCAmelCase__ ).to(lowerCAmelCase__ ) elif is_abit: __UpperCAmelCase : Union[str, Any] = bnb.nn.Paramsabit(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ , **lowerCAmelCase__ ).to(lowerCAmelCase__ ) __UpperCAmelCase : List[str] = new_value if fpaa_statistics is not None: setattr(module.weight , """SCB""" , fpaa_statistics.to(lowerCAmelCase__ ) ) else: if value is None: __UpperCAmelCase : Optional[Any] = old_value.to(lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , torch.Tensor ): __UpperCAmelCase : Tuple = value.to(lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[int] = torch.tensor(lowerCAmelCase__ , device=lowerCAmelCase__ ) if is_buffer: __UpperCAmelCase : Any = new_value else: __UpperCAmelCase : Union[str, Any] = nn.Parameter(lowerCAmelCase__ , requires_grad=old_value.requires_grad ) __UpperCAmelCase : Dict = new_value def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : int=False ): """simple docstring""" for name, module in model.named_children(): if current_key_name is None: __UpperCAmelCase : Any = [] current_key_name.append(lowerCAmelCase__ ) if (isinstance(lowerCAmelCase__ , nn.Linear ) or isinstance(lowerCAmelCase__ , lowerCAmelCase__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in """.""".join(lowerCAmelCase__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __UpperCAmelCase , __UpperCAmelCase : str = module.weight.shape else: __UpperCAmelCase : Optional[int] = module.in_features __UpperCAmelCase : Tuple = module.out_features if quantization_config.quantization_method() == "llm_int8": __UpperCAmelCase : Optional[Any] = bnb.nn.LinearabitLt( lowerCAmelCase__ , lowerCAmelCase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) __UpperCAmelCase : int = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: __UpperCAmelCase : Optional[Any] = bnb.nn.Linearabit( lowerCAmelCase__ , lowerCAmelCase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) __UpperCAmelCase : str = True # Store the module class in case we need to transpose the weight later __UpperCAmelCase : Optional[Any] = type(lowerCAmelCase__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowerCAmelCase__ ) if len(list(module.children() ) ) > 0: __UpperCAmelCase , __UpperCAmelCase : Dict = _replace_with_bnb_linear( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_been_replaced=lowerCAmelCase__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Union[str, Any]=None ): """simple docstring""" __UpperCAmelCase : List[Any] = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert __UpperCAmelCase , __UpperCAmelCase : Any = _replace_with_bnb_linear( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def lowercase_ ( *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any] ): """simple docstring""" warnings.warn( """`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowerCAmelCase__ , ) return replace_with_bnb_linear(*lowerCAmelCase__ , **lowerCAmelCase__ ) def lowercase_ ( *lowerCAmelCase__ : int , **lowerCAmelCase__ : Tuple ): """simple docstring""" warnings.warn( """`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowerCAmelCase__ , ) return set_module_quantized_tensor_to_device(*lowerCAmelCase__ , **lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase : str = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() __UpperCAmelCase : Optional[int] = find_tied_parameters(lowerCAmelCase__ ) # For compatibility with Accelerate < 0.18 if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __UpperCAmelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __UpperCAmelCase : List[str] = sum(lowerCAmelCase__ , [] ) __UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase__ ) > 0 # Check if it is a base model __UpperCAmelCase : Optional[Any] = not hasattr(lowerCAmelCase__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __UpperCAmelCase : Tuple = list(model.named_children() ) __UpperCAmelCase : Dict = [list_modules[-1][0]] # add last module together with tied weights __UpperCAmelCase : str = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ ) # remove ".weight" from the keys __UpperCAmelCase : Any = [""".weight""", """.bias"""] __UpperCAmelCase : Dict = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __UpperCAmelCase : List[str] = name.replace(lowerCAmelCase__ , """""" ) filtered_module_names.append(lowerCAmelCase__ ) return filtered_module_names
16
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip _UpperCamelCase = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ): """simple docstring""" return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths ) def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = [] if args.gold_data_mode == "qa": __UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ ) for answer_list in data[1]: __UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ ) answers.append(lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : str = [[reference] for reference in references] __UpperCAmelCase : Optional[int] = 0 for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ): total += 1 em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __UpperCAmelCase : int = 100.0 * em / total __UpperCAmelCase : Dict = 100.0 * fa / total logger.info(f'F1: {fa:.2f}' ) logger.info(f'EM: {em:.2f}' ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : Tuple = args.k __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()] __UpperCAmelCase : Union[str, Any] = 0 for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] ) __UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k __UpperCAmelCase : List[str] = 100.0 * em / total logger.info(f'Precision@{k}: {em: .2f}' ) def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ): """simple docstring""" def strip_title(lowerCAmelCase__ : Optional[int] ): if title.startswith("""\"""" ): __UpperCAmelCase : List[Any] = title[1:] if title.endswith("""\"""" ): __UpperCAmelCase : int = title[:-1] return title __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device ) __UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ ) __UpperCAmelCase : int = question_enc_outputs[0] __UpperCAmelCase : Dict = rag_model.retriever( lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) __UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) __UpperCAmelCase : Union[str, Any] = [] for docs in all_docs: __UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(lowerCAmelCase__ ) ) return provenance_strings def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ): """simple docstring""" with torch.no_grad(): __UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ ) __UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device ) __UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device ) __UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) __UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) if args.print_predictions: for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) ) return answers def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) __UpperCAmelCase : str = parser.parse_args() __UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowercase_ ( lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = {} if args.model_type is None: __UpperCAmelCase : str = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration __UpperCAmelCase : Dict = args.n_docs if args.index_name is not None: __UpperCAmelCase : Union[str, Any] = args.index_name if args.index_path is not None: __UpperCAmelCase : Dict = args.index_path else: __UpperCAmelCase : str = BartForConditionalGeneration __UpperCAmelCase : str = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k __UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): __UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) __UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ ) model.retriever.init_retrieval() else: __UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: __UpperCAmelCase : Union[str, Any] = [] for line in tqdm(lowerCAmelCase__ ): questions.append(line.strip() ) if len(lowerCAmelCase__ ) == args.eval_batch_size: __UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" ) preds_file.flush() __UpperCAmelCase : List[str] = [] if len(lowerCAmelCase__ ) > 0: __UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write("""\n""".join(lowerCAmelCase__ ) ) preds_file.flush() score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": _UpperCamelCase = get_args() main(args)
16
1
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("""socket.socket""" ) @patch("""builtins.open""" ) def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : List[str] = Mock() __UpperCAmelCase : Tuple = conn, Mock() __UpperCAmelCase : List[str] = iter([1, None] ) __UpperCAmelCase : Tuple = lambda lowerCAmelCase__ : next(lowerCAmelCase__ ) # ===== invoke ===== send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
16
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _A : @staticmethod def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 ) __UpperCAmelCase : Tuple = len(__UpperCAmelCase ) self.assertGreater(__UpperCAmelCase , 0 ) self.assertEqual( __UpperCAmelCase , [ { """score""": ANY(__UpperCAmelCase ), """label""": ANY(__UpperCAmelCase ), """box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )}, } for i in range(__UpperCAmelCase ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> Tuple: '''simple docstring''' pass @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) __UpperCAmelCase : str = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) __UpperCAmelCase : Any = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ) -> List[str]: '''simple docstring''' pass @require_torch @slow def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[int] = 0.2 __UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : Optional[int] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 2 __UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" ) __UpperCAmelCase : List[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
16
1
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = XLMTokenizer _SCREAMING_SNAKE_CASE : Union[str, Any] = False def __A ( self ) -> Tuple: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCAmelCase : int = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] __UpperCAmelCase : Optional[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __UpperCAmelCase : int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = """lower newer""" __UpperCAmelCase : str = """lower newer""" return input_text, output_text def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = XLMTokenizer(self.vocab_file , self.merges_file ) __UpperCAmelCase : Dict = """lower""" __UpperCAmelCase : Dict = ["""low""", """er</w>"""] __UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokens + ["""<unk>"""] __UpperCAmelCase : Any = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase ) @slow def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : str = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" ) __UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCAmelCase ) __UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ) __UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
16
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} _UpperCamelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } _UpperCamelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } _UpperCamelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[Any] = ConvBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars ): __UpperCAmelCase : Dict = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) ) __UpperCAmelCase : Union[str, Any] = do_lower_case __UpperCAmelCase : str = strip_accents __UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars __UpperCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase ) __UpperCAmelCase : List[Any] = do_lower_case def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
16
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device _UpperCamelCase = False class _A ( unittest.TestCase ): pass @slow @require_torch_gpu class _A ( unittest.TestCase ): def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCAmelCase : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) __UpperCAmelCase : str = torch.manual_seed(0 ) __UpperCAmelCase : Tuple = pipe( image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images __UpperCAmelCase : Dict = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __UpperCAmelCase : str = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCamelCase = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''OwlViTFeatureExtractor'''] _UpperCamelCase = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
1
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset _UpperCamelCase = pd.read_csv( '''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/''' '''position_salaries.csv''' ) _UpperCamelCase = dataset.iloc[:, 1:2].values _UpperCamelCase = dataset.iloc[:, 2].values _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = train_test_split(X, y, test_size=0.2, random_state=0) _UpperCamelCase = PolynomialFeatures(degree=4) _UpperCamelCase = poly_reg.fit_transform(X) _UpperCamelCase = LinearRegression() pol_reg.fit(X_poly, y) def lowercase_ ( ): """simple docstring""" plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color="""red""" ) plt.plot(lowerCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(lowerCAmelCase__ ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
16
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor _UpperCamelCase = logging.get_logger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
16
1
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ): """simple docstring""" return int((input_a, input_a).count(0 ) != 0 ) def lowercase_ ( ): """simple docstring""" assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
16
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _SCREAMING_SNAKE_CASE : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) __UpperCAmelCase : List[Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) __UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) __UpperCAmelCase : Dict = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) __UpperCAmelCase : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) __UpperCAmelCase : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' import torch __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = pipeline("""text-classification""" ) __UpperCAmelCase : int = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : Any = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) __UpperCAmelCase : int = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) __UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_classifier, ["HuggingFace is in", "This is another test"] def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : int = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __UpperCAmelCase : Union[str, Any] = """HuggingFace is in""" __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) __UpperCAmelCase : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""] __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __UpperCAmelCase : Any = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase ) __UpperCAmelCase : Any = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , ) __UpperCAmelCase : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} __UpperCAmelCase : Optional[int] = text_classifier(__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __UpperCAmelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(__UpperCAmelCase ): text_classifier(__UpperCAmelCase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
16
1
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path _UpperCamelCase = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) _UpperCamelCase = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} _UpperCamelCase = '''zero2''' _UpperCamelCase = '''zero3''' _UpperCamelCase = [ZEROa, ZEROa] def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : str = parameterized.to_safe_name("""_""".join(str(lowerCAmelCase__ ) for x in param.args ) ) return f'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test _UpperCamelCase = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _A ( __SCREAMING_SNAKE_CASE ): @parameterized.expand(__UpperCAmelCase , name_func=__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' self.run_and_check( stage=__UpperCAmelCase , model=__UpperCAmelCase , distributed=__UpperCAmelCase , fpaa=__UpperCAmelCase , ) @require_torch_multi_gpu @parameterized.expand(__UpperCAmelCase , name_func=__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' self.run_and_check( stage=__UpperCAmelCase , model=__UpperCAmelCase , distributed=__UpperCAmelCase , fpaa=__UpperCAmelCase , ) @parameterized.expand(__UpperCAmelCase , name_func=__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' self.run_and_check( stage=__UpperCAmelCase , model=__UpperCAmelCase , distributed=__UpperCAmelCase , fpaa=__UpperCAmelCase , ) @require_torch_multi_gpu @parameterized.expand(__UpperCAmelCase , name_func=__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' self.run_and_check( stage=__UpperCAmelCase , model=__UpperCAmelCase , distributed=__UpperCAmelCase , fpaa=__UpperCAmelCase , ) def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 10 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = True , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = models[model] __UpperCAmelCase : Dict = self.run_trainer( stage=__UpperCAmelCase , model_name=__UpperCAmelCase , eval_steps=__UpperCAmelCase , num_train_epochs=1 , distributed=__UpperCAmelCase , fpaa=__UpperCAmelCase , ) self.do_checks(__UpperCAmelCase ) return output_dir def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 10 , __UpperCAmelCase = 1 , __UpperCAmelCase = True , __UpperCAmelCase = True , ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir("""./xxx""" , after=__UpperCAmelCase ) __UpperCAmelCase : str = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__UpperCAmelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(["""--fp16"""] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __UpperCAmelCase : List[Any] = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __UpperCAmelCase : Optional[Any] = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __UpperCAmelCase : List[Any] = self.get_launcher(__UpperCAmelCase ) __UpperCAmelCase : Tuple = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__UpperCAmelCase , env=self.get_env() ) return output_dir def __A ( self , __UpperCAmelCase=False ) -> Any: '''simple docstring''' # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) __UpperCAmelCase : int = min(2 , get_gpu_count() ) if distributed else 1 return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
16
'''simple docstring''' from ..utils import DummyObject, requires_backends class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] ) class _A ( metaclass=__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""sentencepiece"""] )
16
1
'''simple docstring''' import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _A ( unittest.TestCase ): def __A ( self ) -> Any: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) __UpperCAmelCase : str = controlnet_params __UpperCAmelCase : str = """bird""" __UpperCAmelCase : str = jax.device_count() __UpperCAmelCase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) __UpperCAmelCase : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) __UpperCAmelCase : str = pipe.prepare_image_inputs([canny_image] * num_samples ) __UpperCAmelCase : List[str] = jax.random.PRNGKey(0 ) __UpperCAmelCase : Dict = jax.random.split(__UpperCAmelCase , jax.device_count() ) __UpperCAmelCase : Optional[int] = replicate(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = shard(__UpperCAmelCase ) __UpperCAmelCase : Dict = shard(__UpperCAmelCase ) __UpperCAmelCase : Any = pipe( prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=50 , jit=__UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) __UpperCAmelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __UpperCAmelCase : int = images[0, 253:256, 253:256, -1] __UpperCAmelCase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __UpperCAmelCase : List[str] = jnp.array( [0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) __UpperCAmelCase , __UpperCAmelCase : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) __UpperCAmelCase : Any = controlnet_params __UpperCAmelCase : int = """Chef in the kitchen""" __UpperCAmelCase : Optional[Any] = jax.device_count() __UpperCAmelCase : int = pipe.prepare_text_inputs([prompts] * num_samples ) __UpperCAmelCase : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) __UpperCAmelCase : Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples ) __UpperCAmelCase : int = jax.random.PRNGKey(0 ) __UpperCAmelCase : str = jax.random.split(__UpperCAmelCase , jax.device_count() ) __UpperCAmelCase : Tuple = replicate(__UpperCAmelCase ) __UpperCAmelCase : int = shard(__UpperCAmelCase ) __UpperCAmelCase : int = shard(__UpperCAmelCase ) __UpperCAmelCase : Any = pipe( prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=50 , jit=__UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) __UpperCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __UpperCAmelCase : Tuple = images[0, 253:256, 253:256, -1] __UpperCAmelCase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __UpperCAmelCase : List[str] = jnp.array( [[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
16
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class _A : def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase ) __UpperCAmelCase : List[str] = length __UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa ) __UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Dict: '''simple docstring''' return self.length def __getitem__( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCAmelCase : Any = True def __A ( self , __UpperCAmelCase=None ) -> str: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : Optional[int] = False return x * self.a[0] + self.b[0] class _A ( torch.nn.Module ): def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]: '''simple docstring''' super().__init__() __UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() ) __UpperCAmelCase : str = True def __A ( self , __UpperCAmelCase=None ) -> Tuple: '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __UpperCAmelCase : int = False return x * self.a + self.b def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} __UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ ) __UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" ) __UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )} def tokenize_function(lowerCAmelCase__ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : List[Any] = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" ) if "label" in examples: __UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCAmelCase : Tuple = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCAmelCase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 ) __UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
16
1
'''simple docstring''' import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any = PriorTransformer _SCREAMING_SNAKE_CASE : Dict = "hidden_states" @property def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 4 __UpperCAmelCase : Optional[int] = 8 __UpperCAmelCase : Tuple = 7 __UpperCAmelCase : Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __UpperCAmelCase : Any = floats_tensor((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __UpperCAmelCase : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __A ( self , __UpperCAmelCase=0 ) -> Dict: '''simple docstring''' torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = 4 __UpperCAmelCase : List[Any] = 8 __UpperCAmelCase : Dict = 7 __UpperCAmelCase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def __A ( self ) -> Optional[Any]: '''simple docstring''' return (4, 8) @property def __A ( self ) -> Tuple: '''simple docstring''' return (4, 8) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : str = { """num_attention_heads""": 2, """attention_head_dim""": 4, """num_layers""": 2, """embedding_dim""": 8, """num_embeddings""": 7, """additional_embeddings""": 4, } __UpperCAmelCase : str = self.dummy_input return init_dict, inputs_dict def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = PriorTransformer.from_pretrained( """hf-internal-testing/prior-dummy""" , output_loading_info=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.prepare_init_args_and_inputs_for_common() __UpperCAmelCase : Any = self.model_class(**__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : int = [*signature.parameters.keys()] __UpperCAmelCase : Optional[int] = ["""hidden_states""", """timestep"""] self.assertListEqual(arg_names[:2] , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" ) __UpperCAmelCase : Optional[Any] = model.to(__UpperCAmelCase ) if hasattr(__UpperCAmelCase , """set_default_attn_processor""" ): model.set_default_attn_processor() __UpperCAmelCase : List[str] = self.get_dummy_seed_input() with torch.no_grad(): __UpperCAmelCase : Dict = model(**__UpperCAmelCase )[0] __UpperCAmelCase : Union[str, Any] = output[0, :5].flatten().cpu() print(__UpperCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __UpperCAmelCase : Optional[int] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] ) self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) ) @slow class _A ( unittest.TestCase ): def __A ( self , __UpperCAmelCase=1 , __UpperCAmelCase=768 , __UpperCAmelCase=77 , __UpperCAmelCase=0 ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(__UpperCAmelCase ) __UpperCAmelCase : Any = batch_size __UpperCAmelCase : str = embedding_dim __UpperCAmelCase : str = num_embeddings __UpperCAmelCase : List[str] = torch.randn((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __A ( self ) -> List[str]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : Optional[Any] = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" ) model.to(__UpperCAmelCase ) __UpperCAmelCase : List[str] = self.get_dummy_seed_input(seed=__UpperCAmelCase ) with torch.no_grad(): __UpperCAmelCase : List[str] = model(**__UpperCAmelCase )[0] assert list(sample.shape ) == [1, 768] __UpperCAmelCase : str = sample[0, :8].flatten().cpu() print(__UpperCAmelCase ) __UpperCAmelCase : Tuple = torch.tensor(__UpperCAmelCase ) assert torch_all_close(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
16
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None @property def __A ( self ) -> Optional[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = (3, 32, 128) __UpperCAmelCase : Tuple = tempfile.mkdtemp() # fmt: off __UpperCAmelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on __UpperCAmelCase : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + """\n""" ) __UpperCAmelCase : List[Any] = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } __UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) return image_input def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : str = self.get_tokenizer() __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) __UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[str] = self.prepare_image_inputs() __UpperCAmelCase : str = image_processor(__UpperCAmelCase , return_tensors="""np""" ) __UpperCAmelCase : int = processor(images=__UpperCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Dict = """test""" __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = """test""" __UpperCAmelCase : int = self.prepare_image_inputs() __UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase : Optional[Any] = processor.char_decode(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase ) __UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in decoded_tok] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = self.get_image_processor() __UpperCAmelCase : Optional[Any] = self.get_tokenizer() __UpperCAmelCase : Any = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : str = None __UpperCAmelCase : Dict = self.prepare_image_inputs() __UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : Any = self.get_image_processor() __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : str = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = torch.randn(1 , 27 , 38 ) __UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 50_257 ) __UpperCAmelCase : Any = torch.randn(1 , 27 , 30_522 ) __UpperCAmelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
16
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _UpperCamelCase = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''MobileViTFeatureExtractor'''] _UpperCamelCase = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
16
'''simple docstring''' from collections.abc import Sequence def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ): """simple docstring""" if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) __UpperCAmelCase : Any = nums[0] for i in range(1 , len(lowerCAmelCase__ ) ): __UpperCAmelCase : Union[str, Any] = nums[i] __UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _UpperCamelCase = int(input('''Enter number of elements : ''').strip()) _UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
16
1
'''simple docstring''' import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class _A ( __SCREAMING_SNAKE_CASE ): def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def __A ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : List[Any] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def __A ( self ) -> Any: '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __A ( self ) -> Tuple: '''simple docstring''' with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): __UpperCAmelCase : List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def __A ( self ) -> Tuple: '''simple docstring''' with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): __UpperCAmelCase : Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def __A ( self ) -> Union[str, Any]: '''simple docstring''' import PIL.Image __UpperCAmelCase : Union[str, Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCAmelCase ) as mock_cast_to_python_objects: __UpperCAmelCase : str = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) ) __UpperCAmelCase , __UpperCAmelCase : int = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , __UpperCAmelCase ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = pa.BufferReader(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , pa.Buffer ) else pa.memory_map(lowerCAmelCase__ ) __UpperCAmelCase : str = pa.ipc.open_stream(lowerCAmelCase__ ) __UpperCAmelCase : pa.Table = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : Tuple = pa.BufferOutputStream() __UpperCAmelCase : List[Any] = pa.schema(lowerCAmelCase__ ) if fields else None with ArrowWriter(stream=lowerCAmelCase__ , schema=lowerCAmelCase__ , writer_batch_size=lowerCAmelCase__ ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: __UpperCAmelCase : Union[str, Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : List[str] = pa.BufferOutputStream() __UpperCAmelCase : Tuple = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=lowerCAmelCase__ , features=lowerCAmelCase__ ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata __UpperCAmelCase : str = pa.BufferReader(output.getvalue() ) __UpperCAmelCase : List[str] = pa.ipc.open_stream(lowerCAmelCase__ ) __UpperCAmelCase : pa.Table = f.read_all() __UpperCAmelCase : List[Any] = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(lowerCAmelCase__ ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def lowercase_ ( lowerCAmelCase__ : Tuple ): """simple docstring""" __UpperCAmelCase : int = pa.BufferOutputStream() with ArrowWriter( stream=lowerCAmelCase__ , writer_batch_size=lowerCAmelCase__ , hash_salt="""split_name""" , check_duplicates=lowerCAmelCase__ , ) as writer: with pytest.raises(lowerCAmelCase__ ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) __UpperCAmelCase , __UpperCAmelCase : int = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def lowercase_ ( lowerCAmelCase__ : Tuple ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = pa.BufferOutputStream() with ArrowWriter( stream=lowerCAmelCase__ , writer_batch_size=lowerCAmelCase__ , hash_salt="""split_name""" , check_duplicates=lowerCAmelCase__ , ) as writer: with pytest.raises(lowerCAmelCase__ ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" __UpperCAmelCase : str = pa.BufferOutputStream() with ArrowWriter( stream=lowerCAmelCase__ , writer_batch_size=lowerCAmelCase__ , hash_salt="""split_name""" , check_duplicates=lowerCAmelCase__ , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) __UpperCAmelCase , __UpperCAmelCase : str = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase : Dict = pa.BufferOutputStream() __UpperCAmelCase : Dict = pa.schema(lowerCAmelCase__ ) if fields else None with ArrowWriter(stream=lowerCAmelCase__ , schema=lowerCAmelCase__ , writer_batch_size=lowerCAmelCase__ ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) __UpperCAmelCase , __UpperCAmelCase : Any = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: __UpperCAmelCase : Union[str, Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ): """simple docstring""" __UpperCAmelCase : str = pa.BufferOutputStream() __UpperCAmelCase : List[str] = pa.schema(lowerCAmelCase__ ) if fields else None with ArrowWriter(stream=lowerCAmelCase__ , schema=lowerCAmelCase__ , writer_batch_size=lowerCAmelCase__ ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: __UpperCAmelCase : str = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : Optional[Any] = pa.BufferOutputStream() __UpperCAmelCase : Union[str, Any] = pa.schema(lowerCAmelCase__ ) if fields else None with ArrowWriter(stream=lowerCAmelCase__ , schema=lowerCAmelCase__ , writer_batch_size=lowerCAmelCase__ ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: __UpperCAmelCase : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowerCAmelCase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def lowercase_ ( ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Union[str, Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} __UpperCAmelCase : List[Any] = os.path.join(lowerCAmelCase__ , """test.arrow""" ) with ArrowWriter(path=lowerCAmelCase__ , schema=pa.schema(lowerCAmelCase__ ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(lowerCAmelCase__ , metadata=writer._schema.metadata ) _check_output(lowerCAmelCase__ , 1 ) def lowercase_ ( lowerCAmelCase__ : Dict ): """simple docstring""" if pa.types.is_list(lowerCAmelCase__ ): return get_base_dtype(arr_type.value_type ) else: return arr_type def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ): """simple docstring""" if isinstance(lst[0] , lowerCAmelCase__ ): change_first_primitive_element_in_list(lst[0] , lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[Any] = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = pa.array(TypedSequence(lowerCAmelCase__ , optimized_int_type=lowerCAmelCase__ ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ): """simple docstring""" __UpperCAmelCase : Dict = pa.array(OptimizedTypedSequence(lowerCAmelCase__ , col=lowerCAmelCase__ ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications __UpperCAmelCase : Union[str, Any] = copy.deepcopy(lowerCAmelCase__ ) __UpperCAmelCase : Tuple = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(lowerCAmelCase__ , lowerCAmelCase__ ) __UpperCAmelCase : Any = pa.array(OptimizedTypedSequence(lowerCAmelCase__ , col=lowerCAmelCase__ ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : List[str] = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=lowerCAmelCase__ ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def lowercase_ ( lowerCAmelCase__ : str ): """simple docstring""" __UpperCAmelCase : int = """mock://dataset-train.arrow""" with ArrowWriter(path=lowerCAmelCase__ , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(lowerCAmelCase__ ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) __UpperCAmelCase , __UpperCAmelCase : str = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(lowerCAmelCase__ ) def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : List[Any] = pa.BufferOutputStream() with ParquetWriter(stream=lowerCAmelCase__ ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) __UpperCAmelCase , __UpperCAmelCase : Any = writer.finalize() assert num_examples == 2 assert num_bytes > 0 __UpperCAmelCase : Union[str, Any] = pa.BufferReader(output.getvalue() ) __UpperCAmelCase : pa.Table = pq.read_table(lowerCAmelCase__ ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Any ): """simple docstring""" import PIL.Image __UpperCAmelCase : Tuple = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowerCAmelCase__ , format="""png""" ) __UpperCAmelCase : List[str] = pa.BufferOutputStream() with ParquetWriter( stream=lowerCAmelCase__ , features=Features({"""image""": Image()} ) , embed_local_files=lowerCAmelCase__ ) as writer: writer.write({"""image""": image_path} ) writer.finalize() __UpperCAmelCase : Union[str, Any] = pa.BufferReader(output.getvalue() ) __UpperCAmelCase : pa.Table = pq.read_table(lowerCAmelCase__ ) __UpperCAmelCase : Tuple = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , lowerCAmelCase__ ) with open(lowerCAmelCase__ , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def lowercase_ ( ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = pa.schema([pa.field("""col_1""" , pa.string() , nullable=lowerCAmelCase__ )] ) __UpperCAmelCase : Optional[int] = pa.BufferOutputStream() with ArrowWriter(stream=lowerCAmelCase__ ) as writer: writer._build_writer(inferred_schema=lowerCAmelCase__ ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
16
'''simple docstring''' class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = data __UpperCAmelCase : int = previous __UpperCAmelCase : Union[str, Any] = next_node def __str__( self ) -> str: '''simple docstring''' return f'{self.data}' def __A ( self ) -> int: '''simple docstring''' return self.data def __A ( self ) -> List[str]: '''simple docstring''' return self.next def __A ( self ) -> str: '''simple docstring''' return self.previous class _A : def __init__( self , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = head def __iter__( self ) -> str: '''simple docstring''' return self def __A ( self ) -> str: '''simple docstring''' if not self.current: raise StopIteration else: __UpperCAmelCase : List[str] = self.current.get_data() __UpperCAmelCase : int = self.current.get_next() return value class _A : def __init__( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = None # First node in list __UpperCAmelCase : List[str] = None # Last node in list def __str__( self ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = self.head __UpperCAmelCase : Optional[int] = [] while current is not None: nodes.append(current.get_data() ) __UpperCAmelCase : Any = current.get_next() return " ".join(str(__UpperCAmelCase ) for node in nodes ) def __contains__( self , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.head while current: if current.get_data() == value: return True __UpperCAmelCase : Optional[Any] = current.get_next() return False def __iter__( self ) -> str: '''simple docstring''' return LinkedListIterator(self.head ) def __A ( self ) -> List[Any]: '''simple docstring''' if self.head: return self.head.get_data() return None def __A ( self ) -> Optional[Any]: '''simple docstring''' if self.tail: return self.tail.get_data() return None def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: __UpperCAmelCase : str = node __UpperCAmelCase : List[str] = node else: self.insert_before_node(self.head , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' if self.head is None: self.set_head(__UpperCAmelCase ) else: self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[int] = Node(__UpperCAmelCase ) if self.head is None: self.set_head(__UpperCAmelCase ) else: self.set_tail(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Tuple = node __UpperCAmelCase : List[Any] = node.previous if node.get_previous() is None: __UpperCAmelCase : str = node_to_insert else: __UpperCAmelCase : Optional[Any] = node_to_insert __UpperCAmelCase : List[Any] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : List[str] = node __UpperCAmelCase : Union[str, Any] = node.next if node.get_next() is None: __UpperCAmelCase : Dict = node_to_insert else: __UpperCAmelCase : Any = node_to_insert __UpperCAmelCase : List[str] = node_to_insert def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Optional[Any] = Node(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.head while node: if current_position == position: self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase ) return current_position += 1 __UpperCAmelCase : int = node.next self.insert_after_node(self.tail , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Node: '''simple docstring''' __UpperCAmelCase : Dict = self.head while node: if node.get_data() == item: return node __UpperCAmelCase : List[str] = node.get_next() raise Exception("""Node not found""" ) def __A ( self , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if (node := self.get_node(__UpperCAmelCase )) is not None: if node == self.head: __UpperCAmelCase : Optional[int] = self.head.get_next() if node == self.tail: __UpperCAmelCase : Union[str, Any] = self.tail.get_previous() self.remove_node_pointers(__UpperCAmelCase ) @staticmethod def __A ( __UpperCAmelCase ) -> None: '''simple docstring''' if node.get_next(): __UpperCAmelCase : Optional[Any] = node.previous if node.get_previous(): __UpperCAmelCase : int = node.next __UpperCAmelCase : Tuple = None __UpperCAmelCase : Union[str, Any] = None def __A ( self ) -> List[Any]: '''simple docstring''' return self.head is None def lowercase_ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
16
1