code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : str=4 , __lowerCamelCase : Union[str, Any]=None , ):
UpperCamelCase :Dict = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :List[str] = seq_length
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :int = use_input_mask
UpperCamelCase :List[str] = use_token_type_ids
UpperCamelCase :int = use_labels
UpperCamelCase :int = vocab_size
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :Optional[Any] = num_hidden_layers
UpperCamelCase :Dict = num_attention_heads
UpperCamelCase :Optional[int] = intermediate_size
UpperCamelCase :int = hidden_act
UpperCamelCase :Dict = hidden_dropout_prob
UpperCamelCase :int = attention_probs_dropout_prob
UpperCamelCase :int = max_position_embeddings
UpperCamelCase :List[str] = type_vocab_size
UpperCamelCase :Union[str, Any] = type_sequence_label_size
UpperCamelCase :Dict = initializer_range
UpperCamelCase :Tuple = num_labels
UpperCamelCase :Optional[Any] = num_choices
UpperCamelCase :Optional[Any] = scope
def _A ( self : Union[str, Any] ):
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Optional[int] = None
if self.use_input_mask:
UpperCamelCase :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase :str = None
UpperCamelCase :Optional[Any] = None
UpperCamelCase :List[Any] = None
if self.use_labels:
UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : Optional[Any] ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def _A ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
UpperCamelCase :Optional[Any] = LlamaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
UpperCamelCase :Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : str , ):
UpperCamelCase :Optional[int] = True
UpperCamelCase :Any = LlamaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
UpperCamelCase :Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
UpperCamelCase :Dict = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : str , ):
UpperCamelCase :str = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , ):
UpperCamelCase :str = True
UpperCamelCase :List[Any] = True
UpperCamelCase :Optional[Any] = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
UpperCamelCase :Any = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase :List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase :List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase :Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["""hidden_states"""][0]
UpperCamelCase :Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase :List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Tuple = config_and_inputs
UpperCamelCase :Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case__ : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
snake_case__ : List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
snake_case__ : Any = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : Tuple = False
def _A ( self : str ):
UpperCamelCase :str = LlamaModelTester(self )
UpperCamelCase :Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _A ( self : Tuple ):
self.config_tester.run_common_tests()
def _A ( self : List[str] ):
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase :Any = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = 3
UpperCamelCase :Dict = input_dict["""input_ids"""]
UpperCamelCase :List[Any] = input_ids.ne(1 ).to(__lowerCamelCase )
UpperCamelCase :List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase :Dict = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A ( self : Optional[Any] ):
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :List[str] = 3
UpperCamelCase :Optional[int] = """single_label_classification"""
UpperCamelCase :Optional[Any] = input_dict["""input_ids"""]
UpperCamelCase :Union[str, Any] = input_ids.ne(1 ).to(__lowerCamelCase )
UpperCamelCase :Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase :Union[str, Any] = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A ( self : List[str] ):
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Dict = 3
UpperCamelCase :Optional[Any] = """multi_label_classification"""
UpperCamelCase :List[str] = input_dict["""input_ids"""]
UpperCamelCase :str = input_ids.ne(1 ).to(__lowerCamelCase )
UpperCamelCase :Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase :Tuple = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _A ( self : Union[str, Any] ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _A ( self : Optional[int] , __lowerCamelCase : Optional[int] ):
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :str = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase :Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase :Dict = LlamaModel(__lowerCamelCase )
original_model.to(__lowerCamelCase )
original_model.eval()
UpperCamelCase :Any = original_model(__lowerCamelCase ).last_hidden_state
UpperCamelCase :Union[str, Any] = original_model(__lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase :List[str] = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase :List[str] = LlamaModel(__lowerCamelCase )
scaled_model.to(__lowerCamelCase )
scaled_model.eval()
UpperCamelCase :Union[str, Any] = scaled_model(__lowerCamelCase ).last_hidden_state
UpperCamelCase :List[str] = scaled_model(__lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-5 ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _A ( self : List[str] ):
UpperCamelCase :Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase :Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
UpperCamelCase :str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCamelCase :Dict = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase :str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _A ( self : Tuple ):
UpperCamelCase :Dict = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase :Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
UpperCamelCase :List[Any] = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
UpperCamelCase :Tuple = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase :List[Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _A ( self : Optional[int] ):
UpperCamelCase :Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase :Dict = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
UpperCamelCase :Union[str, Any] = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
UpperCamelCase :Tuple = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase :Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _A ( self : Union[str, Any] ):
UpperCamelCase :Dict = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase :Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
UpperCamelCase :Optional[Any] = model(torch.tensor(__lowerCamelCase ) )
UpperCamelCase :Any = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
UpperCamelCase :str = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
UpperCamelCase :str = """Simply put, the theory of relativity states that """
UpperCamelCase :Optional[int] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
UpperCamelCase :Union[str, Any] = tokenizer.encode(__lowerCamelCase , return_tensors="""pt""" )
UpperCamelCase :int = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__lowerCamelCase )
# greedy generation outputs
UpperCamelCase :Any = model.generate(__lowerCamelCase , max_new_tokens=64 , top_p=__lowerCamelCase , temperature=1 , do_sample=__lowerCamelCase )
UpperCamelCase :List[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 38 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : str=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : int=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]="last" , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , ):
UpperCamelCase :int = parent
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :str = seq_length
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Optional[int] = use_input_lengths
UpperCamelCase :Union[str, Any] = use_token_type_ids
UpperCamelCase :List[str] = use_labels
UpperCamelCase :Dict = gelu_activation
UpperCamelCase :Optional[int] = sinusoidal_embeddings
UpperCamelCase :List[Any] = causal
UpperCamelCase :Optional[int] = asm
UpperCamelCase :List[str] = n_langs
UpperCamelCase :int = vocab_size
UpperCamelCase :List[Any] = n_special
UpperCamelCase :List[Any] = hidden_size
UpperCamelCase :List[str] = num_hidden_layers
UpperCamelCase :List[Any] = num_attention_heads
UpperCamelCase :Tuple = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Tuple = max_position_embeddings
UpperCamelCase :List[str] = type_vocab_size
UpperCamelCase :Union[str, Any] = type_sequence_label_size
UpperCamelCase :int = initializer_range
UpperCamelCase :List[str] = num_labels
UpperCamelCase :Optional[int] = num_choices
UpperCamelCase :Optional[Any] = summary_type
UpperCamelCase :Tuple = use_proj
UpperCamelCase :Optional[Any] = scope
def _A ( self : List[str] ):
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :List[Any] = None
if self.use_input_lengths:
UpperCamelCase :Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase :str = None
if self.use_token_type_ids:
UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase :Optional[int] = None
UpperCamelCase :int = None
UpperCamelCase :List[Any] = None
if self.use_labels:
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A ( self : List[Any] ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , ):
UpperCamelCase :Tuple = FlaubertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :int = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase )
UpperCamelCase :List[Any] = model(__lowerCamelCase , langs=__lowerCamelCase )
UpperCamelCase :int = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , ):
UpperCamelCase :Any = FlaubertWithLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Dict = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ):
UpperCamelCase :Any = FlaubertForQuestionAnsweringSimple(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Any = model(__lowerCamelCase )
UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : str , ):
UpperCamelCase :str = FlaubertForQuestionAnswering(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Any = model(__lowerCamelCase )
UpperCamelCase :Optional[int] = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , )
((UpperCamelCase) , ) :int = result_with_labels.to_tuple()
UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
((UpperCamelCase) , ) :List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , ):
UpperCamelCase :Optional[int] = FlaubertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Tuple = model(__lowerCamelCase )
UpperCamelCase :List[str] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , ):
UpperCamelCase :Dict = self.num_labels
UpperCamelCase :Tuple = FlaubertForTokenClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ):
UpperCamelCase :Union[str, Any] = self.num_choices
UpperCamelCase :List[Any] = FlaubertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase :Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase :int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase :Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : str ):
UpperCamelCase :List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :List[Any] = config_and_inputs
UpperCamelCase :Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ):
UpperCamelCase :Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
UpperCamelCase :Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
UpperCamelCase :List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _A ( self : str ):
UpperCamelCase :List[Any] = FlaubertModelTester(self )
UpperCamelCase :Any = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=37 )
def _A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _A ( self : List[Any] ):
UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowerCamelCase )
def _A ( self : List[Any] ):
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCamelCase )
def _A ( self : Union[str, Any] ):
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCamelCase )
def _A ( self : Tuple ):
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCamelCase )
@slow
def _A ( self : Any ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Optional[int] = FlaubertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@slow
@require_torch_gpu
def _A ( self : Tuple ):
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
UpperCamelCase :Optional[Any] = True
UpperCamelCase :Optional[Any] = model_class(config=__lowerCamelCase )
UpperCamelCase :str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :str = torch.jit.trace(
__lowerCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , """traced_model.pt""" ) )
UpperCamelCase :int = torch.jit.load(os.path.join(__lowerCamelCase , """traced_model.pt""" ) , map_location=__lowerCamelCase )
loaded(inputs_dict["""input_ids"""].to(__lowerCamelCase ) , inputs_dict["""attention_mask"""].to(__lowerCamelCase ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : Optional[Any] ):
UpperCamelCase :Union[str, Any] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
UpperCamelCase :Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
UpperCamelCase :Tuple = model(__lowerCamelCase )[0]
UpperCamelCase :Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase :int = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
| 38 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase ( _A : List[Any] , _A : Dict , _A : int , _A : List[str] , _A : List[Any]=True , _A : List[str]="pt" ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={"""add_prefix_space""": True} if isinstance(_A , _A ) and not line.startswith(""" """ ) else {}
lowerCamelCase_ =padding_side
return tokenizer(
[line] , max_length=_A , padding="""max_length""" if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def __UpperCamelCase ( _A : List[str] , _A : Any , _A : int=None , ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="train" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="" , )-> Dict:
super().__init__()
lowerCamelCase_ =Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + """.source""" )
lowerCamelCase_ =Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + """.target""" )
lowerCamelCase_ =self.get_char_lens(self.src_file )
lowerCamelCase_ =max_source_length
lowerCamelCase_ =max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
lowerCamelCase_ =tokenizer
lowerCamelCase_ =prefix
if n_obs is not None:
lowerCamelCase_ =self.src_lens[:n_obs]
lowerCamelCase_ =src_lang
lowerCamelCase_ =tgt_lang
def __len__( self )-> List[Any]:
return len(self.src_lens )
def __getitem__( self , _SCREAMING_SNAKE_CASE )-> Dict[str, torch.Tensor]:
lowerCamelCase_ =index + 1 # linecache starts at 1
lowerCamelCase_ =self.prefix + linecache.getline(str(self.src_file ) , _SCREAMING_SNAKE_CASE ).rstrip("""\n""" )
lowerCamelCase_ =linecache.getline(str(self.tgt_file ) , _SCREAMING_SNAKE_CASE ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCamelCase_ =(
self.tokenizer.question_encoder if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
)
lowerCamelCase_ =self.tokenizer.generator if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
lowerCamelCase_ =encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_source_length , """right""" )
lowerCamelCase_ =encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_target_length , """right""" )
lowerCamelCase_ =source_inputs["""input_ids"""].squeeze()
lowerCamelCase_ =target_inputs["""input_ids"""].squeeze()
lowerCamelCase_ =source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )-> Optional[Any]:
return [len(_SCREAMING_SNAKE_CASE ) for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()]
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict[str, torch.Tensor]:
lowerCamelCase_ =torch.stack([x["""input_ids"""] for x in batch] )
lowerCamelCase_ =torch.stack([x["""attention_mask"""] for x in batch] )
lowerCamelCase_ =torch.stack([x["""decoder_input_ids"""] for x in batch] )
lowerCamelCase_ =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
lowerCamelCase_ =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
lowerCamelCase_ =trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ =trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A : str = getLogger(__name__)
def __UpperCamelCase ( _A : List[List] ) ->Union[str, Any]:
"""simple docstring"""
return list(itertools.chain.from_iterable(_A ) )
def __UpperCamelCase ( _A : str ) ->None:
"""simple docstring"""
lowerCamelCase_ =get_git_info()
save_json(_A , os.path.join(_A , """git_log.json""" ) )
def __UpperCamelCase ( _A : int , _A : Union[str, Any] , _A : Union[str, Any]=4 , **_A : Union[str, Any] ) ->List[str]:
"""simple docstring"""
with open(_A , """w""" ) as f:
json.dump(_A , _A , indent=_A , **_A )
def __UpperCamelCase ( _A : Any ) ->Tuple:
"""simple docstring"""
with open(_A ) as f:
return json.load(_A )
def __UpperCamelCase ( ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =git.Repo(search_parent_directories=_A )
lowerCamelCase_ ={
"""repo_id""": str(_A ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase ( _A : Callable , _A : Iterable ) ->List:
"""simple docstring"""
return list(map(_A , _A ) )
def __UpperCamelCase ( _A : Tuple , _A : Optional[int] ) ->Optional[int]:
"""simple docstring"""
with open(_A , """wb""" ) as f:
return pickle.dump(_A , _A )
def __UpperCamelCase ( _A : List[str] ) ->Any:
"""simple docstring"""
def remove_articles(_A : Union[str, Any] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , _A )
def white_space_fix(_A : Dict ):
return " ".join(text.split() )
def remove_punc(_A : int ):
lowerCamelCase_ =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =normalize_answer(_A ).split()
lowerCamelCase_ =normalize_answer(_A ).split()
lowerCamelCase_ =Counter(_A ) & Counter(_A )
lowerCamelCase_ =sum(common.values() )
if num_same == 0:
return 0
lowerCamelCase_ =1.0 * num_same / len(_A )
lowerCamelCase_ =1.0 * num_same / len(_A )
lowerCamelCase_ =(2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase ( _A : List[Any] , _A : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return normalize_answer(_A ) == normalize_answer(_A )
def __UpperCamelCase ( _A : List[str] , _A : List[str] ) ->Dict:
"""simple docstring"""
assert len(_A ) == len(_A )
lowerCamelCase_ =0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def __UpperCamelCase ( _A : Tuple ) ->List[Any]:
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def __UpperCamelCase ( _A : Dict , _A : int , _A : Tuple ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCamelCase_ ="""dropout_rate"""
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(_A ) )
delattr(_A , _A )
continue
lowerCamelCase_ =p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 49 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__A : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__A : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__A : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="auto" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=500 , _SCREAMING_SNAKE_CASE="gpt2-large" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=25 , )-> List[str]:
lowerCamelCase_ =compute_mauve(
p_text=_SCREAMING_SNAKE_CASE , q_text=_SCREAMING_SNAKE_CASE , p_features=_SCREAMING_SNAKE_CASE , q_features=_SCREAMING_SNAKE_CASE , p_tokens=_SCREAMING_SNAKE_CASE , q_tokens=_SCREAMING_SNAKE_CASE , num_buckets=_SCREAMING_SNAKE_CASE , pca_max_data=_SCREAMING_SNAKE_CASE , kmeans_explained_var=_SCREAMING_SNAKE_CASE , kmeans_num_redo=_SCREAMING_SNAKE_CASE , kmeans_max_iter=_SCREAMING_SNAKE_CASE , featurize_model_name=_SCREAMING_SNAKE_CASE , device_id=_SCREAMING_SNAKE_CASE , max_text_length=_SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE , mauve_scaling_factor=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE , )
return out
| 49 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__A = BlipProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Union[str, Any] ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : List[str] ,**A : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : int ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : List[Any] ):
__A = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 15 |
'''simple docstring'''
from pathlib import Path
import fire
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : List[str] = Path(lowerCAmelCase__ )
__UpperCAmelCase : str = Path(lowerCAmelCase__ )
dest_dir.mkdir(exist_ok=lowerCAmelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase : Optional[int] = dest_dir.joinpath(path.name )
print(lowerCAmelCase__ )
dest_path.open("""w""" ).write("""\n""".join(lowerCAmelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 254 | 0 |
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase_ : Dict = list[list[float | int]]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = len(__lowerCamelCase )
__a = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
__a = 42
__a = 42
__a = 42
__a = 42
__a = 42
__a = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
__a = matrix[row][col]
__a = vector[row][0]
__a = 0
__a = 0
while row < size and col < size:
# pivoting
__a = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase , __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__a , __a = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCamelCase ):
__a = augmented[rowa][col] / augmented[row][col]
__a = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCamelCase ):
for row in range(__lowerCamelCase ):
__a = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCamelCase )
]
def lowerCAmelCase( __lowerCamelCase ):
__a = len(__lowerCamelCase )
__a = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
__a = [[0] for _ in range(__lowerCamelCase )]
__a = 42
__a = 42
__a = 42
__a = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
__a = (x_val + 1) ** (size - col - 1)
__a = y_val
__a = solve(__lowerCamelCase , __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def lowerCAmelCase( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase( __lowerCamelCase = question_function , __lowerCamelCase = 10 ):
__a = [func(__lowerCamelCase ) for x_val in range(1 , order + 1 )]
__a = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__a = 0
__a = 42
__a = 42
for poly in polynomials:
__a = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 197 | from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__snake_case ):
A__ : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 197 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : Any = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
A : str = 0
A : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
A : Union[str, Any] = tuple[int, int]
class A :
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None , ) -> None:
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = g_cost
A__ = parent
A__ = self.calculate_heuristic()
A__ = self.g_cost + self.h_cost
def a_ ( self : Dict ) -> float:
"""simple docstring"""
A__ = self.pos_x - self.goal_x
A__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : int , __lowerCAmelCase : Node ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> Tuple:
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase )
A__ = [self.start]
A__ = []
A__ = False
def a_ ( self : List[str] ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__lowerCAmelCase )
self.closed_nodes.append(__lowerCAmelCase )
A__ = self.get_successors(__lowerCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCAmelCase )
else:
self.open_nodes.append(__lowerCAmelCase )
return [self.start.pos]
def a_ ( self : Optional[Any] , __lowerCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) )
return successors
def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> list[TPosition]:
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class A :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> None:
"""simple docstring"""
A__ = AStar(__lowerCAmelCase , __lowerCAmelCase )
A__ = AStar(__lowerCAmelCase , __lowerCAmelCase )
A__ = False
def a_ ( self : int ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A__ = self.fwd_astar.open_nodes.pop(0 )
A__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowerCAmelCase , __lowerCAmelCase )
self.fwd_astar.closed_nodes.append(__lowerCAmelCase )
self.bwd_astar.closed_nodes.append(__lowerCAmelCase )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
A__ = astar.open_nodes.pop(
astar.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowerCAmelCase )
else:
astar.open_nodes.append(__lowerCAmelCase )
return [self.fwd_astar.start.pos]
def a_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> list[TPosition]:
"""simple docstring"""
A__ = self.fwd_astar.retrace_path(__lowerCAmelCase )
A__ = self.bwd_astar.retrace_path(__lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
A : Optional[int] = (0, 0)
A : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Dict = time.time()
A : Optional[Any] = AStar(init, goal)
A : Optional[int] = a_star.search()
A : Optional[int] = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
A : Dict = time.time()
A : Tuple = BidirectionalAStar(init, goal)
A : List[Any] = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 274 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : int = 16
A_ : str = 32
def UpperCamelCase (lowercase_: Optional[Any] ) -> List[Any]:
return int(x / 2**20 )
class _a :
'''simple docstring'''
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
A__ : Any = torch.cuda.memory_allocated()
return self
def __exit__( self , *A__ ):
gc.collect()
torch.cuda.empty_cache()
A__ : Optional[int] = torch.cuda.memory_allocated()
A__ : str = torch.cuda.max_memory_allocated()
A__ : str = bamb(self.end - self.begin )
A__ : Any = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase (lowercase_: Accelerator , lowercase_: int = 16 , lowercase_: str = "bert-base-cased" , lowercase_: int = 320 , lowercase_: int = 160 , ) -> Dict:
A__ : List[Any] = AutoTokenizer.from_pretrained(lowercase_ )
A__ : List[Any] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f"""train[:{n_train}]""", """validation""": f"""validation[:{n_val}]"""} )
def tokenize_function(lowercase_: int ):
# max_length=None => use the model max length (it's actually the default)
A__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ : str = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_: Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A__ : Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] ) -> Tuple:
# Initialize accelerator
A__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Union[str, Any] = config["""lr"""]
A__ : Any = int(config["""num_epochs"""] )
A__ : List[Any] = int(config["""seed"""] )
A__ : Optional[Any] = int(config["""batch_size"""] )
A__ : str = args.model_name_or_path
set_seed(lowercase_ )
A__ , A__ : int = get_dataloaders(lowercase_ , lowercase_ , lowercase_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
A__ : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ : Any = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
A__ : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A__ : Optional[int] = 1
A__ : Optional[Any] = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ : List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
A__ : int = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Tuple = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
A__ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
A__ : str = 0
# Now we train the model
A__ : Tuple = {}
for epoch in range(lowercase_ , lowercase_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase_ ):
A__ : Optional[Any] = model(**lowercase_ )
A__ : Any = outputs.loss
A__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
A__ : Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase () -> Dict:
A__ : Dict = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase_ , )
parser.add_argument(
"""--output_dir""" , type=lowercase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowercase_ , default=lowercase_ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowercase_ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowercase_ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase_ , default=1 , help="""Number of train epochs.""" , )
A__ : List[Any] = parser.parse_args()
A__ : List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 192 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A_ : List[Any] = logging.get_logger(__name__)
class _a :
'''simple docstring'''
def __init__( self , A__ = None , A__ = None , A__=None , A__=None ):
if not conversation_id:
A__ : List[Any] = uuid.uuida()
if past_user_inputs is None:
A__ : Dict = []
if generated_responses is None:
A__ : int = []
A__ : uuid.UUID = conversation_id
A__ : List[str] = past_user_inputs
A__ : List[str] = generated_responses
A__ : Optional[str] = text
def __eq__( self , A__ ):
if not isinstance(A__ , A__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __A ( self , A__ , A__ = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
A__ : str = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
A__ : Tuple = text
def __A ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A__ : Tuple = None
def __A ( self , A__ ):
self.generated_responses.append(A__ )
def __A ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
A__ : Optional[Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
A__ : str = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__magic_name__ , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
super().__init__(*A__ , **A__ )
if self.tokenizer.pad_token_id is None:
A__ : Tuple = self.tokenizer.eos_token
def __A ( self , A__=None , A__=None , A__=None , **A__ ):
A__ : Tuple = {}
A__ : List[str] = {}
A__ : Union[str, Any] = {}
if min_length_for_response is not None:
A__ : str = min_length_for_response
if minimum_tokens is not None:
A__ : List[str] = minimum_tokens
if "max_length" in generate_kwargs:
A__ : List[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A__ : Optional[int] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A__ , A__=0 , **A__ ):
A__ : Optional[Any] = super().__call__(A__ , num_workers=A__ , **A__ )
if isinstance(A__ , A__ ) and len(A__ ) == 1:
return outputs[0]
return outputs
def __A ( self , A__ , A__=32 ):
if not isinstance(A__ , A__ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
A__ : List[str] = self.tokenizer._build_conversation_input_ids(A__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A__ : Tuple = self._legacy_parse_and_tokenize(A__ )
if self.framework == "pt":
A__ : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
A__ : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __A ( self , A__ , A__=10 , **A__ ):
A__ : List[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
A__ : Optional[int] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
A__ : Dict = max_length - minimum_tokens
A__ : Optional[int] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
A__ : str = model_inputs["""attention_mask"""][:, -trim:]
A__ : List[str] = model_inputs.pop("""conversation""" )
A__ : Dict = max_length
A__ : str = self.model.generate(**A__ , **A__ )
if self.model.config.is_encoder_decoder:
A__ : Union[str, Any] = 1
else:
A__ : Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __A ( self , A__ , A__=True ):
A__ : Dict = model_outputs["""output_ids"""]
A__ : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ , )
A__ : Optional[int] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(A__ )
return conversation
def __A ( self , A__ ):
A__ : str = self.tokenizer.eos_token_id
A__ : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A__ , add_special_tokens=A__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A__ , add_special_tokens=A__ ) )
if len(A__ ) > self.tokenizer.model_max_length:
A__ : str = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 192 | 1 |
A : int = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A : int = [{"type": "code", "content": INSTALL_CONTENT}]
A : List[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 305 | from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305 | 1 |
"""simple docstring"""
import numpy
# List of input, output pairs
__UpperCAmelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__UpperCAmelCase = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
__UpperCAmelCase = [2, 4, 1, 5]
__UpperCAmelCase = len(train_data)
__UpperCAmelCase = 0.009
def _snake_case ( lowercase__ : List[str] , lowercase__ : str="train" ) -> Optional[int]:
'''simple docstring'''
return calculate_hypothesis_value(lowercase__ , lowercase__ ) - output(
lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 0
for i in range(len(lowercase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( lowercase__ : int , lowercase__ : int=m ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = 0
for i in range(lowercase__ ):
if index == -1:
summation_value += _error(lowercase__ )
else:
summation_value += _error(lowercase__ ) * train_data[i][0][index]
return summation_value
def _snake_case ( lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = summation_of_cost_derivative(lowercase__ , lowercase__ ) / m
return cost_derivative_value
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCAmelCase_ :Union[str, Any] = 0.000002
lowerCAmelCase_ :Optional[Any] = 0
lowerCAmelCase_ :int = 0
while True:
j += 1
lowerCAmelCase_ :List[Any] = [0, 0, 0, 0]
for i in range(0 , len(lowercase__ ) ):
lowerCAmelCase_ :Any = get_cost_derivative(i - 1 )
lowerCAmelCase_ :Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowercase__ , lowercase__ , atol=lowercase__ , rtol=lowercase__ , ):
break
lowerCAmelCase_ :Optional[int] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def _snake_case ( ) -> Dict:
'''simple docstring'''
for i in range(len(lowercase__ ) ):
print(("""Actual output value:""", output(lowercase__ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowercase__ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 84 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :int = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :List[Any] = jax.device_count()
lowerCAmelCase_ :Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Optional[Any] = replicate(__A )
lowerCAmelCase_ :Union[str, Any] = shard(__A )
lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2"""
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
__A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :Optional[int] = scheduler_params
lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Tuple = jax.device_count()
lowerCAmelCase_ :str = num_samples * [prompt]
lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Tuple = replicate(__A )
lowerCAmelCase_ :Optional[int] = shard(__A )
lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 84 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = data
lowercase_ : str = None
lowercase_ : Union[str, Any] = None
def lowercase__( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowercase_ : Union[str, Any] = input('Enter the value of the root node: ' ).strip().lower()
lowercase_ : queue.Queue = queue.Queue()
lowercase_ : Optional[int] = TreeNode(int(_A ) )
q.put(_A )
while not q.empty():
lowercase_ : str = q.get()
lowercase_ : Any = F'''Enter the left node of {node_found.data}: '''
lowercase_ : Optional[Any] = input(_A ).strip().lower() or 'n'
if check == "n":
return tree_node
lowercase_ : Optional[int] = TreeNode(int(_A ) )
lowercase_ : Optional[int] = left_node
q.put(_A )
lowercase_ : List[Any] = F'''Enter the right node of {node_found.data}: '''
lowercase_ : int = input(_A ).strip().lower() or 'n'
if check == "n":
return tree_node
lowercase_ : int = TreeNode(int(_A ) )
lowercase_ : Union[str, Any] = right_node
q.put(_A )
raise
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not isinstance(_A , _A ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
if not isinstance(_A , _A ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
if not isinstance(_A , _A ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : queue.Queue = queue.Queue()
q.put(_A )
while not q.empty():
lowercase_ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : queue.Queue = queue.Queue()
q.put(_A )
while not q.empty():
lowercase_ : Dict = []
while not q.empty():
lowercase_ : List[Any] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_A )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : list[TreeNode] = []
lowercase_ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_A )
lowercase_ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowercase_ : Optional[Any] = stack.pop()
# start to traverse its right child
lowercase_ : List[Any] = n.right
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : list[TreeNode] = []
lowercase_ : Union[str, Any] = node
while n or stack:
while n:
stack.append(_A )
lowercase_ : Optional[int] = n.left
lowercase_ : str = stack.pop()
print(n.data , end=',' )
lowercase_ : Union[str, Any] = n.right
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : str = [], []
lowercase_ : List[Any] = node
stacka.append(_A )
while stacka: # to find the reversed order of post order, store it in stack2
lowercase_ : List[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_A )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowercase__( __SCREAMING_SNAKE_CASE : Dict = "" , __SCREAMING_SNAKE_CASE : Optional[int]=50 , __SCREAMING_SNAKE_CASE : Dict="*" ):
if not s:
return "\n" + width * char
lowercase_ : str = divmod(width - len(_A ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
__SCREAMING_SNAKE_CASE =build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 353 | """simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 5000_0000 ):
"""simple docstring"""
a :Any = set()
a :Optional[int] = int((limit - 24) ** (1 / 2) )
a :Dict = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCAmelCase_ ) ) )
for primea in primes:
a :int = primea * primea
for primea in primes:
a :List[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
a :List[Any] = primea * primea * primea * primea
a :str = square + cube + tetr
if total >= limit:
break
ret.add(UpperCAmelCase_ )
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 94 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 ):
"""simple docstring"""
a :Any = set(range(3 , UpperCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) )
a :Union[str, Any] = [float(UpperCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 94 | 1 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCamelCase (lowercase_: Dict , lowercase_: Union[str, Any] , lowercase_: Tuple , lowercase_: Dict=5 ) -> Optional[int]:
assert masked_input.count("""<mask>""" ) == 1
A__ : Tuple = torch.tensor(tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
A__ : Tuple = model(_UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
A__ : List[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
A__ : str = logits[0, masked_index, :]
A__ : Optional[int] = logits.softmax(dim=0 )
A__ , A__ : List[str] = prob.topk(k=_UpperCamelCase , dim=0 )
A__ : Union[str, Any] = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCamelCase ) )] )
A__ : Optional[Any] = tokenizer.mask_token
A__ : Union[str, Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
A__ : int = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(_UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(_UpperCamelCase ) , _UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_UpperCamelCase , _UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
A_ : str = CamembertTokenizer.from_pretrained('camembert-base')
A_ : List[str] = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
A_ : Optional[Any] = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 359 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : Optional[Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Optional[Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_MAPPING
A_ : Any = auto_class_update(FlaxAutoModel)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Optional[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : List[str] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 141 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 222 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_UpperCAmelCase : Optional[int] = HUGGINGFACE_HUB_CACHE
_UpperCAmelCase : List[str] = "config.json"
_UpperCAmelCase : Union[str, Any] = "diffusion_pytorch_model.bin"
_UpperCAmelCase : List[Any] = "diffusion_flax_model.msgpack"
_UpperCAmelCase : Optional[Any] = "model.onnx"
_UpperCAmelCase : int = "diffusion_pytorch_model.safetensors"
_UpperCAmelCase : Optional[Any] = "weights.pb"
_UpperCAmelCase : Tuple = "https://huggingface.co"
_UpperCAmelCase : Union[str, Any] = default_cache_path
_UpperCAmelCase : Optional[Any] = "diffusers_modules"
_UpperCAmelCase : List[Any] = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_UpperCAmelCase : Tuple = ["fp16", "non-ema"]
_UpperCAmelCase : Any = ".self_attn"
| 222 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_lowerCamelCase : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self : str ):
'''simple docstring'''
return 12
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 12
@property
def A ( self : Dict ):
'''simple docstring'''
return 32
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = 12
_snake_case = 12
_snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
_snake_case = TransformeraDModel(**lowercase )
return model
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'cpu'
_snake_case = self.dummy_vqvae
_snake_case = self.dummy_text_encoder
_snake_case = self.dummy_tokenizer
_snake_case = self.dummy_transformer
_snake_case = VQDiffusionScheduler(self.num_embed )
_snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase )
_snake_case = VQDiffusionPipeline(
vqvae=lowercase , text_encoder=lowercase , tokenizer=lowercase , transformer=lowercase , scheduler=lowercase , learned_classifier_free_sampling_embeddings=lowercase , )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = 'teddy bear playing in the pool'
_snake_case = torch.Generator(device=lowercase ).manual_seed(0 )
_snake_case = pipe([prompt] , generator=lowercase , num_inference_steps=2 , output_type='np' )
_snake_case = output.images
_snake_case = torch.Generator(device=lowercase ).manual_seed(0 )
_snake_case = pipe(
[prompt] , generator=lowercase , output_type='np' , return_dict=lowercase , num_inference_steps=2 )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_snake_case = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'cpu'
_snake_case = self.dummy_vqvae
_snake_case = self.dummy_text_encoder
_snake_case = self.dummy_tokenizer
_snake_case = self.dummy_transformer
_snake_case = VQDiffusionScheduler(self.num_embed )
_snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_snake_case = VQDiffusionPipeline(
vqvae=lowercase , text_encoder=lowercase , tokenizer=lowercase , transformer=lowercase , scheduler=lowercase , learned_classifier_free_sampling_embeddings=lowercase , )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = 'teddy bear playing in the pool'
_snake_case = torch.Generator(device=lowercase ).manual_seed(0 )
_snake_case = pipe([prompt] , generator=lowercase , num_inference_steps=2 , output_type='np' )
_snake_case = output.images
_snake_case = torch.Generator(device=lowercase ).manual_seed(0 )
_snake_case = pipe(
[prompt] , generator=lowercase , output_type='np' , return_dict=lowercase , num_inference_steps=2 )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_snake_case = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
_snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
_snake_case = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_snake_case = torch.Generator(device=lowercase ).manual_seed(0 )
_snake_case = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowercase , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 367 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a_ ( __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : str ) -> Union[str, Any]:
# Load configuration defined in the metadata file
with open(__lowercase ) as metadata_file:
_snake_case = json.load(__lowercase )
_snake_case = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_snake_case = torch.load(__lowercase , map_location='cpu' )['module']
# Load the entity vocab file
_snake_case = load_original_entity_vocab(__lowercase )
# add an entry for [MASK2]
_snake_case = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_snake_case = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_snake_case = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase )
_snake_case = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , 'tokenizer_config.json' ) , 'r' ) as f:
_snake_case = json.load(__lowercase )
_snake_case = 'MLukeTokenizer'
with open(os.path.join(__lowercase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__lowercase , __lowercase )
_snake_case = MLukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
_snake_case = tokenizer.convert_tokens_to_ids(['@'] )[0]
_snake_case = tokenizer.convert_tokens_to_ids(['#'] )[0]
_snake_case = state_dict['embeddings.word_embeddings.weight']
_snake_case = word_emb[ent_init_index].unsqueeze(0 )
_snake_case = word_emb[enta_init_index].unsqueeze(0 )
_snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_snake_case = state_dict[bias_name]
_snake_case = decoder_bias[ent_init_index].unsqueeze(0 )
_snake_case = decoder_bias[enta_init_index].unsqueeze(0 )
_snake_case = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_snake_case = f'''encoder.layer.{layer_index}.attention.self.'''
_snake_case = state_dict[prefix + matrix_name]
_snake_case = state_dict[prefix + matrix_name]
_snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_snake_case = state_dict['entity_embeddings.entity_embeddings.weight']
_snake_case = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
_snake_case = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_snake_case = state_dict['entity_predictions.bias']
_snake_case = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
_snake_case = torch.cat([entity_prediction_bias, entity_mask_bias] )
_snake_case = LukeForMaskedLM(config=__lowercase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
_snake_case = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
_snake_case = state_dict[key]
else:
_snake_case = state_dict[key]
_snake_case , _snake_case = model.load_state_dict(__lowercase , strict=__lowercase )
if set(__lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_snake_case = MLukeTokenizer.from_pretrained(__lowercase , task='entity_classification' )
_snake_case = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
_snake_case = (0, 9)
_snake_case = tokenizer(__lowercase , entity_spans=[span] , return_tensors='pt' )
_snake_case = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case = torch.Size((1, 33, 768) )
_snake_case = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case = torch.Size((1, 1, 768) )
_snake_case = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_snake_case = MLukeTokenizer.from_pretrained(__lowercase )
_snake_case = 'Tokyo is the capital of <mask>.'
_snake_case = (24, 30)
_snake_case = tokenizer(__lowercase , entity_spans=[span] , return_tensors='pt' )
_snake_case = model(**__lowercase )
_snake_case = encoding['input_ids'][0].tolist()
_snake_case = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
_snake_case = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowercase )
_snake_case = outputs.entity_logits[0][0].argmax().item()
_snake_case = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__lowercase ) )
model.save_pretrained(__lowercase )
def a_ ( __lowercase : int ) -> int:
_snake_case = ['[MASK]', '[PAD]', '[UNK]']
_snake_case = [json.loads(__lowercase ) for line in open(__lowercase )]
_snake_case = {}
for entry in data:
_snake_case = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_snake_case = entity_id
break
_snake_case = f'''{language}:{entity_name}'''
_snake_case = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 130 | 0 |
from manim import *
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__UpperCAmelCase : Tuple = Rectangle(height=0.2_5 , width=0.2_5 )
__UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
__UpperCAmelCase : str = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
__UpperCAmelCase : Optional[int] = VGroup(*a_ ).arrange(a_ , buff=0 )
__UpperCAmelCase : int = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
__UpperCAmelCase : List[str] = Text('''CPU''' , font_size=24 )
__UpperCAmelCase : str = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
__UpperCAmelCase : List[str] = [mem.copy() for i in range(4 )]
__UpperCAmelCase : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
__UpperCAmelCase : Tuple = Text('''GPU''' , font_size=24 )
__UpperCAmelCase : Optional[int] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.move_to([-1, -1, 0] )
self.add(a_ )
__UpperCAmelCase : str = [mem.copy() for i in range(6 )]
__UpperCAmelCase : List[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
__UpperCAmelCase : int = Text('''Model''' , font_size=24 )
__UpperCAmelCase : Tuple = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.add(a_ )
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[Any] = []
for i, rect in enumerate(a_ ):
__UpperCAmelCase : Union[str, Any] = fill.copy().set_fill(a_ , opacity=0.8 )
target.move_to(a_ )
model_arr.append(a_ )
__UpperCAmelCase : Optional[int] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(a_ )
self.add(*a_ , *a_ )
__UpperCAmelCase : str = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase : str = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
__UpperCAmelCase : Optional[int] = VGroup(*a_ ).arrange(a_ , buff=0 )
__UpperCAmelCase : int = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
__UpperCAmelCase : List[str] = Text('''Disk''' , font_size=24 )
__UpperCAmelCase : Tuple = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
disk.move_to([-4, -1.2_5, 0] )
self.add(a_ , a_ )
__UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase : Optional[int] = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a_ , a_ )
__UpperCAmelCase : List[Any] = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a_ )
__UpperCAmelCase : List[str] = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ ) )
__UpperCAmelCase : Union[str, Any] = Square(0.3 )
input.set_fill(a_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , a_ , buff=0.5 )
self.play(Write(a_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=a_ , buff=0.0_2 )
self.play(MoveToTarget(a_ ) )
self.play(FadeOut(a_ ) )
__UpperCAmelCase : List[str] = Arrow(start=a_ , end=a_ , color=a_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , a_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__UpperCAmelCase : Optional[int] = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) )
__UpperCAmelCase : Tuple = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.0_2}
self.play(
Write(a_ ) , Circumscribe(model_arr[0] , color=a_ , **a_ ) , Circumscribe(model_cpu_arr[0] , color=a_ , **a_ ) , Circumscribe(gpu_rect[0] , color=a_ , **a_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__UpperCAmelCase : Tuple = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , a_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
__UpperCAmelCase : Optional[Any] = AnimationGroup(
FadeOut(a_ , run_time=0.5 ) , MoveToTarget(a_ , run_time=0.5 ) , FadeIn(a_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(a_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__UpperCAmelCase : Dict = 0.7
self.play(
Circumscribe(model_arr[i] , **a_ ) , Circumscribe(cpu_left_col_base[i] , **a_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=a_ , **a_ ) , Circumscribe(gpu_rect[0] , color=a_ , **a_ ) , Circumscribe(model_arr[i + 1] , color=a_ , **a_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=a_ , **a_ ) , Circumscribe(cpu_left_col_base[-1] , color=a_ , **a_ ) , Circumscribe(gpu_rect[0] , color=a_ , **a_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__UpperCAmelCase : Optional[Any] = a_c
__UpperCAmelCase : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(a_ ) , FadeOut(a_ , run_time=0.5 ) , )
__UpperCAmelCase : Tuple = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) , MoveToTarget(a_ ) )
self.wait()
| 226 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A =logging.get_logger(__name__)
__A ={
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """deberta-v2"""
def __init__( self : Optional[int] , a_ : List[str]=12_81_00 , a_ : Optional[Any]=15_36 , a_ : Optional[Any]=24 , a_ : List[Any]=24 , a_ : Optional[int]=61_44 , a_ : List[Any]="gelu" , a_ : Any=0.1 , a_ : Tuple=0.1 , a_ : Optional[Any]=5_12 , a_ : Tuple=0 , a_ : Dict=0.0_2 , a_ : Optional[Any]=1e-7 , a_ : List[str]=False , a_ : List[Any]=-1 , a_ : List[str]=0 , a_ : Optional[Any]=True , a_ : List[Any]=None , a_ : Optional[int]=0 , a_ : Tuple="gelu" , **a_ : List[str] , ):
'''simple docstring'''
super().__init__(**a_ )
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[int] = relative_attention
__UpperCAmelCase : int = max_relative_positions
__UpperCAmelCase : Any = pad_token_id
__UpperCAmelCase : int = position_biased_input
# Backwards compatibility
if type(a_ ) == str:
__UpperCAmelCase : Optional[Any] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__UpperCAmelCase : Tuple = pos_att_type
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : str = kwargs.get('''pooler_hidden_size''' , a_ )
__UpperCAmelCase : Union[str, Any] = pooler_dropout
__UpperCAmelCase : Tuple = pooler_hidden_act
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
@property
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : int = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def snake_case__ ( self : Any , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , a_ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = super().generate_dummy_inputs(preprocessor=a_ , framework=a_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 226 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.model_type == "bert":
SCREAMING_SNAKE_CASE : Tuple = BertForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
SCREAMING_SNAKE_CASE : Dict = model.state_dict()
SCREAMING_SNAKE_CASE : str = {}
for w in ["word_embeddings", "position_embeddings"]:
SCREAMING_SNAKE_CASE : int = state_dict[F'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : List[str] = state_dict[F'{prefix}.embeddings.LayerNorm.{w}']
SCREAMING_SNAKE_CASE : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : Dict = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
SCREAMING_SNAKE_CASE : Tuple = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
SCREAMING_SNAKE_CASE : Any = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
SCREAMING_SNAKE_CASE : List[str] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
SCREAMING_SNAKE_CASE : List[str] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
SCREAMING_SNAKE_CASE : Dict = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
SCREAMING_SNAKE_CASE : Tuple = state_dict['''cls.predictions.decoder.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : Any = state_dict[F'cls.predictions.transform.dense.{w}']
SCREAMING_SNAKE_CASE : int = state_dict[F'cls.predictions.transform.LayerNorm.{w}']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint) | 369 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 317 | 0 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : Union[str, Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Optional[Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Union[str, Any]="replace" , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Tuple="</s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : Tuple="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : List[Any]="<pad>" , lowerCAmelCase_ : List[Any]="<mask>" , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Tuple , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 136 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "sequence-classification"
def __init__( self : Optional[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
if type(lowerCAmelCase_) == dict:
lowercase_ = Namespace(**lowerCAmelCase_)
lowercase_ = glue_output_modes[hparams.task]
lowercase_ = glue_tasks_num_labels[hparams.task]
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , self.mode)
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.model(**lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ = outputs[0]
lowercase_ = self.trainer.lr_schedulers[0]["""scheduler"""]
lowercase_ = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.hparams
lowercase_ = processors[args.task]()
lowercase_ = processor.get_labels()
for mode in ["train", "dev"]:
lowercase_ = self._feature_file(lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowercase_ = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowercase_ = convert_examples_to_features(
lowerCAmelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_)
torch.save(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False):
"""simple docstring"""
lowercase_ = """dev""" if mode == """test""" else mode
lowercase_ = self._feature_file(lowerCAmelCase_)
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
lowercase_ = torch.load(lowerCAmelCase_)
lowercase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ , lowercase_ = outputs[:2]
lowercase_ = logits.detach().cpu().numpy()
lowercase_ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowercase_ = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowercase_ = np.argmax(lowerCAmelCase_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = np.squeeze(lowerCAmelCase_)
lowercase_ = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , lowerCAmelCase_ , lowerCAmelCase_)}
lowercase_ = dict(results.items())
lowercase_ = results
return ret, preds_list, out_label_list
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : list):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_)
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCAmelCase_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
add_generic_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = GLUETransformer.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase_ = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
lowercase_ = GLUETransformer(__lowerCAmelCase )
lowercase_ = generic_train(__lowerCAmelCase , __lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase_ = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__lowerCAmelCase ) )
lowercase_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 136 | 1 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase( UpperCAmelCase_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCAmelCase_ , UpperCAmelCase_ ) -> bool:
UpperCAmelCase : Union[str, Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCAmelCase_ ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : List[str] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 1.0 , ):
return mean(
function_to_integrate(uniform(UpperCAmelCase_ , UpperCAmelCase_ ) ) for _ in range(UpperCAmelCase_ ) ) * (max_value - min_value)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 1.0 ):
def identity_function(UpperCAmelCase_ ) -> float:
return x
UpperCAmelCase : Any = area_under_curve_estimator(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def UpperCamelCase( UpperCAmelCase_ ):
def function_to_integrate(UpperCAmelCase_ ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Optional[int] = area_under_curve_estimator(
UpperCAmelCase_ , UpperCAmelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase( ):
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=UpperCAmelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=UpperCAmelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=UpperCAmelCase_ , help='where to store parsed gold_data_path file' , )
UpperCAmelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCAmelCase : int = json.load(UpperCAmelCase_ )
for dpr_record in tqdm(UpperCAmelCase_ ):
UpperCAmelCase : Any = dpr_record['question']
UpperCAmelCase : List[str] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(UpperCAmelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:str = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Any = ["""CLIPFeatureExtractor"""]
SCREAMING_SNAKE_CASE__:List[Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[Any] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 136 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : int=99 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Dict=37 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=5_12 , lowerCAmelCase_ : Dict=16 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
A__ : List[str] =parent
A__ : Optional[Any] =batch_size
A__ : List[Any] =seq_length
A__ : Tuple =is_training
A__ : Optional[int] =use_input_mask
A__ : Optional[int] =use_token_type_ids
A__ : Tuple =use_labels
A__ : Dict =vocab_size
A__ : Optional[Any] =hidden_size
A__ : Tuple =num_hidden_layers
A__ : Tuple =num_attention_heads
A__ : Union[str, Any] =intermediate_size
A__ : Dict =hidden_act
A__ : List[Any] =hidden_dropout_prob
A__ : Optional[int] =attention_probs_dropout_prob
A__ : List[str] =max_position_embeddings
A__ : str =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : List[Any] =num_labels
A__ : str =num_choices
A__ : Union[str, Any] =scope
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
A__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] =None
if self.use_input_mask:
A__ : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : List[str] =None
A__ : int =None
A__ : List[Any] =None
if self.use_labels:
A__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any =ids_tensor([self.batch_size] , self.num_choices )
A__ : str =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =DistilBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[str] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =DistilBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Dict =DistilBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =self.num_labels
A__ : Dict =DistilBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
A__ : Optional[int] =self.num_labels
A__ : Union[str, Any] =DistilBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : int =self.num_choices
A__ : Tuple =DistilBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : int =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : List[str] =config_and_inputs
A__ : str ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = True
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
A__ : Any =DistilBertModelTester(self )
A__ : Tuple =ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[int] =DistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
@require_torch_gpu
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
A__ : Any =True
A__ : Optional[Any] =model_class(config=lowerCAmelCase_ )
A__ : Dict =self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Optional[int] =torch.jit.trace(
lowerCAmelCase_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """traced_model.pt""" ) )
A__ : List[Any] =torch.jit.load(os.path.join(lowerCAmelCase_ , """traced_model.pt""" ) , map_location=lowerCAmelCase_ )
loaded(inputs_dict["""input_ids"""].to(lowerCAmelCase_ ) , inputs_dict["""attention_mask"""].to(lowerCAmelCase_ ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A__ : Optional[Any] =torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
A__ : int =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
A__ : Tuple =torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
A__ : Optional[int] =torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) )
| 136 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = LayoutLMTokenizer
UpperCAmelCase_ = LayoutLMTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def snake_case_ (self ) -> Optional[Any]:
super().setUp()
UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ (self , **__a ) -> Any:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case_ (self , __a ) -> List[str]:
UpperCamelCase = "UNwant\u00E9d,running"
UpperCamelCase = "unwanted, running"
return input_text, output_text
def snake_case_ (self ) -> int:
UpperCamelCase = self.tokenizer_class(self.vocab_file )
UpperCamelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [7, 4, 5, 10, 8, 9] )
def snake_case_ (self ) -> Any:
pass
| 153 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , exponent // 2 , _SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_SCREAMING_SNAKE_CASE , exponent - 1 , _SCREAMING_SNAKE_CASE )) % modulo_value
def a__ ( _SCREAMING_SNAKE_CASE = 1_777 , _SCREAMING_SNAKE_CASE = 1_855 , _SCREAMING_SNAKE_CASE = 8 ):
"""simple docstring"""
UpperCamelCase = base
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def A__ ( __lowerCamelCase = 1_00_00_00, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = defaultdict(__lowerCamelCase )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
SCREAMING_SNAKE_CASE_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCamelCase, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 359 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def A__ ( ):
SCREAMING_SNAKE_CASE_ = 9
SCREAMING_SNAKE_CASE_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE_ = kruskal(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
| 257 | 0 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_a : str = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_a : Optional[int] = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "maskformer"
_UpperCamelCase : Optional[Any] = {"hidden_size": "mask_feature_size"}
_UpperCamelCase : Any = ["resnet", "swin"]
_UpperCamelCase : Dict = ["detr"]
def __init__( self , a__ = 256 , a__ = 256 , a__ = 0.1 , a__ = False , a__ = None , a__ = None , a__ = 0.0_2 , a__ = 1.0 , a__ = 1.0 , a__ = 1.0 , a__ = 2_0.0 , a__ = None , **a__ , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCAmelCase : Union[str, Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(a__ , a__ ):
_lowerCAmelCase : str = backbone_config.pop("""model_type""" )
_lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Dict = config_class.from_dict(a__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCAmelCase : int = DetrConfig()
else:
# verify that the decoder is supported
_lowerCAmelCase : Union[str, Any] = (
decoder_config.pop("""model_type""" ) if isinstance(a__ , a__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(a__ , a__ ):
_lowerCAmelCase : int = CONFIG_MAPPING[decoder_type]
_lowerCAmelCase : Tuple = config_class.from_dict(a__ )
_lowerCAmelCase : List[Any] = backbone_config
_lowerCAmelCase : List[str] = decoder_config
# main feature dimension for the model
_lowerCAmelCase : int = fpn_feature_size
_lowerCAmelCase : Tuple = mask_feature_size
# initializer
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = init_xavier_std
# Hungarian matcher && loss
_lowerCAmelCase : Dict = cross_entropy_weight
_lowerCAmelCase : Union[str, Any] = dice_weight
_lowerCAmelCase : Tuple = mask_weight
_lowerCAmelCase : int = use_auxiliary_loss
_lowerCAmelCase : List[Any] = no_object_weight
_lowerCAmelCase : int = output_auxiliary_logits
_lowerCAmelCase : str = self.decoder_config.encoder_attention_heads
_lowerCAmelCase : int = self.decoder_config.num_hidden_layers
super().__init__(**a__ )
@classmethod
def __A ( cls , a__ , a__ , **a__ ):
return cls(
backbone_config=a__ , decoder_config=a__ , **a__ , )
def __A ( self ):
_lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : str = self.backbone_config.to_dict()
_lowerCAmelCase : Dict = self.decoder_config.to_dict()
_lowerCAmelCase : List[Any] = self.__class__.model_type
return output
| 44 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
__lowercase : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=A_ , image_processor=A_ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(A_ )
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
@require_torch
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
UpperCamelCase = pipeline(
'video-classification' , model=A_ , feature_extractor=A_ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
| 222 | 0 |
def UpperCamelCase ( __lowercase : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
A_ : Any = set()
# Replace all the whitespace in our sentence
A_ : Dict = input_str.replace(' ' ,'' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowercase ) == 26
def UpperCamelCase ( __lowercase : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
A_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
A_ : Any = True
elif char.isupper():
A_ : Optional[Any] = True
return all(__lowercase )
def UpperCamelCase ( __lowercase : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase ( ):
'''simple docstring'''
from timeit import timeit
A_ : List[Any] = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' ,setup=__lowercase ) )
print(timeit('is_pangram_faster()' ,setup=__lowercase ) )
print(timeit('is_pangram_fastest()' ,setup=__lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 371 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''lilt'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase=None , lowercase=4 , lowercase=1_0_2_4 , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , **lowercase )
A_ : str = vocab_size
A_ : Tuple = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Union[str, Any] = hidden_act
A_ : int = intermediate_size
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : Any = initializer_range
A_ : str = layer_norm_eps
A_ : Optional[int] = position_embedding_type
A_ : Union[str, Any] = classifier_dropout
A_ : Optional[Any] = channel_shrink_ratio
A_ : int = max_ad_position_embeddings
| 192 | 0 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_UpperCamelCase )
if number < 1:
UpperCAmelCase : str = F"Input value of [number={number}] must be > 0"
raise ValueError(_UpperCamelCase )
UpperCAmelCase : str = 1
for i in range(1 , _UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a :Union[str, Any] = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Optional[Any] = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
a :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 352 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = b, a % b
return a
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def _lowercase ( ) -> Union[str, Any]:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 56 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = inspect.getfile(accelerate.test_utils )
__lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__lowercase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
__lowercase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
__lowercase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
__lowercase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__a : Dict = Accelerator()
__a : Any = (accelerator.state.process_index + 2, 1_0)
__a : Dict = torch.randint(0, 1_0, shape).to(accelerator.device)
__a : Union[str, Any] = """"""
__a : Tuple = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__a : str = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 210 | import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = (KDPMaDiscreteScheduler,)
__a : Dict = 10
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase__ )
return config
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
if str(lowerCAmelCase__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3 | 210 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( __UpperCAmelCase ):
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'OwlViTImageProcessor'
__snake_case = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', UpperCamelCase__, )
lowerCAmelCase_ = kwargs.pop('''feature_extractor''' )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__, UpperCamelCase__ )
def __call__( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__="max_length", UpperCamelCase__="np", **UpperCamelCase__ ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(UpperCamelCase__, UpperCamelCase__ ) or (isinstance(UpperCamelCase__, UpperCamelCase__ ) and not isinstance(text[0], UpperCamelCase__ )):
lowerCAmelCase_ = [self.tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )]
elif isinstance(UpperCamelCase__, UpperCamelCase__ ) and isinstance(text[0], UpperCamelCase__ ):
lowerCAmelCase_ = []
# Maximum number of queries across batch
lowerCAmelCase_ = max([len(UpperCamelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase__ ) != max_num_queries:
lowerCAmelCase_ = t + [''' '''] * (max_num_queries - len(UpperCamelCase__ ))
lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )
encodings.append(UpperCamelCase__ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase_ = np.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCAmelCase_ = np.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase_ = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCAmelCase_ = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase_ = torch.cat([encoding['''input_ids'''] for encoding in encodings], dim=0 )
lowerCAmelCase_ = torch.cat([encoding['''attention_mask'''] for encoding in encodings], dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase_ = tf.stack([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCAmelCase_ = tf.stack([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase_ = BatchEncoding()
lowerCAmelCase_ = input_ids
lowerCAmelCase_ = attention_mask
if query_images is not None:
lowerCAmelCase_ = BatchEncoding()
lowerCAmelCase_ = self.image_processor(
UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ ).pixel_values
lowerCAmelCase_ = query_pixel_values
if images is not None:
lowerCAmelCase_ = self.image_processor(UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )
if text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ), tensor_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', UpperCamelCase__, )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', UpperCamelCase__, )
return self.image_processor
| 167 |
def __UpperCamelCase ( _A ):
if length <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 167 | 1 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class A__ ( snake_case__ ):
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case_ ( self ) -> int:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_ ):
A_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_ ):
A_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
A_ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
A_ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
import PIL.Image
A_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=UpperCAmelCase_ ) as mock_cast_to_python_objects:
A_ = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
A_ , A_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , UpperCAmelCase_ )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = pa.BufferReader(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE, pa.Buffer ) else pa.memory_map(_SCREAMING_SNAKE_CASE )
A_ = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE )
A_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""", [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
A_ = pa.BufferOutputStream()
A_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, schema=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCAmelCase__ ( ) -> Optional[Any]:
A_ = pa.BufferOutputStream()
A_ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, features=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
A_ = pa.BufferReader(output.getvalue() )
A_ = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE )
A_ = f.read_all()
A_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE, hash_salt="""split_name""", check_duplicates=_SCREAMING_SNAKE_CASE, ) as writer:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
writer.write({"""col_1""": """foo""", """col_2""": 1}, key=[1, 2] )
A_ , A_ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""", [None, 2, 10] )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
A_ = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE, hash_salt="""split_name""", check_duplicates=_SCREAMING_SNAKE_CASE, ) as writer:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
writer.write({"""col_1""": """foo""", """col_2""": 1}, key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2}, key=10 )
A_ , A_ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""", [None, 2, 10] )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE, hash_salt="""split_name""", check_duplicates=_SCREAMING_SNAKE_CASE, ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1}, key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2}, key=2 )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""", [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
A_ = pa.BufferOutputStream()
A_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, schema=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""", [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = pa.BufferOutputStream()
A_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, schema=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""", [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = pa.BufferOutputStream()
A_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, schema=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCAmelCase__ ( ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
A_ = os.path.join(_SCREAMING_SNAKE_CASE, """test.arrow""" )
with ArrowWriter(path=_SCREAMING_SNAKE_CASE, schema=pa.schema(_SCREAMING_SNAKE_CASE ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata )
_check_output(_SCREAMING_SNAKE_CASE, 1 )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
if pa.types.is_list(_SCREAMING_SNAKE_CASE ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
if isinstance(lst[0], _SCREAMING_SNAKE_CASE ):
change_first_primitive_element_in_list(lst[0], _SCREAMING_SNAKE_CASE )
else:
A_ = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""", [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
A_ = pa.array(TypedSequence(_SCREAMING_SNAKE_CASE, optimized_int_type=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""", [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
], )
@pytest.mark.parametrize("""sequence""", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
# in range
A_ = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE, col=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
A_ = copy.deepcopy(_SCREAMING_SNAKE_CASE )
A_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
A_ = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE, col=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""", [False, True] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
A_ = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_SCREAMING_SNAKE_CASE ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
A_ = """mock://dataset-train.arrow"""
with ArrowWriter(path=_SCREAMING_SNAKE_CASE, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(_SCREAMING_SNAKE_CASE ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = pa.BufferOutputStream()
with ParquetWriter(stream=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
A_ = pa.BufferReader(output.getvalue() )
A_ = pq.read_table(_SCREAMING_SNAKE_CASE )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""", [False, True] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
import PIL.Image
A_ = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(_SCREAMING_SNAKE_CASE, format="""png""" )
A_ = pa.BufferOutputStream()
with ParquetWriter(
stream=_SCREAMING_SNAKE_CASE, features=Features({"""image""": Image()} ), embed_local_files=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
A_ = pa.BufferReader(output.getvalue() )
A_ = pq.read_table(_SCREAMING_SNAKE_CASE )
A_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""], _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE, """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def UpperCAmelCase__ ( ) -> List[Any]:
A_ = pa.schema([pa.field("""col_1""", pa.string(), nullable=_SCREAMING_SNAKE_CASE )] )
A_ = pa.BufferOutputStream()
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE ) as writer:
writer._build_writer(inferred_schema=_SCREAMING_SNAKE_CASE )
assert writer._schema == pa.schema([pa.field("""col_1""", pa.string() )] )
| 162 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : list[int] , _lowercase : int ) ->list[int]:
'''simple docstring'''
a : int = [0] * no_of_processes
a : Union[str, Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowercase ):
a : int = burst_time[i]
a : str = 0
a : List[str] = 0
a : Union[str, Any] = 9_9999_9999
a : Any = 0
a : Any = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
a : str = remaining_time[j]
a : List[Any] = j
a : Any = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
a : List[str] = remaining_time[short]
if minm == 0:
a : int = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
a : int = False
# Find finish time of current process
a : int = increment_time + 1
# Calculate waiting time
a : str = finish_time - arrival_time[short]
a : Union[str, Any] = finar - burst_time[short]
if waiting_time[short] < 0:
a : Tuple = 0
# Increment time
increment_time += 1
return waiting_time
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : int , _lowercase : list[int] ) ->list[int]:
'''simple docstring'''
a : Tuple = [0] * no_of_processes
for i in range(_lowercase ):
a : Optional[int] = burst_time[i] + waiting_time[i]
return turn_around_time
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : list[int] , _lowercase : int ) ->None:
'''simple docstring'''
a : List[Any] = 0
a : Optional[int] = 0
for i in range(_lowercase ):
a : List[Any] = total_waiting_time + waiting_time[i]
a : Any = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a : int = int(input())
a : Any = [0] * no_of_processes
a : Tuple = [0] * no_of_processes
a : str = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a : Optional[Any] = map(int, input().split())
a : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a : List[Any] = burst_time
a : Optional[int] = no_of_processes
a : List[Any] = waiting_time
a : Dict = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a : Tuple = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 366 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
a : Tuple = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
a : List[str] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
a : List[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> Dict:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> Any:
a : Optional[int] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
a : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
a : Union[str, Any] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
a : Optional[Any] = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 79 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case( _lowerCAmelCase ) -> None:
create_state_space_tree(_lowerCAmelCase , [] , 0 , [0 for i in range(len(_lowerCAmelCase ) )] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> None:
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
for i in range(len(_lowerCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case__ : Union[str, Any] = True
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase )
current_sequence.pop()
snake_case__ : Optional[Any] = False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 35 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase( UpperCAmelCase_ ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , UpperCAmelCase_ , )
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[str] = image[0].size
UpperCAmelCase , UpperCAmelCase : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCAmelCase : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : List[Any] = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0
UpperCAmelCase : Any = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase : Any = 2.0 * image - 1.0
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase : List[Any] = torch.cat(UpperCAmelCase_ , dim=0 )
return image
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return mask
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[Any] = mask[0].size
UpperCAmelCase , UpperCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase : Tuple = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : Any = mask.astype(np.floataa ) / 255.0
UpperCAmelCase : str = 0
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
UpperCAmelCase : List[str] = torch.cat(UpperCAmelCase_ , dim=0 )
return mask
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : UNetaDModel
UpperCAmelCase_ : RePaintScheduler
def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : Tuple ) -> Tuple:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : int = 250 , lowercase_ : float = 0.0 , lowercase_ : int = 10 , lowercase_ : int = 10 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : Dict = image
UpperCAmelCase : Optional[int] = _preprocess_image(lowercase_ )
UpperCAmelCase : Optional[Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Optional[Any] = _preprocess_mask(lowercase_ )
UpperCAmelCase : List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Any = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase : List[str] = original_image.shape
UpperCAmelCase : str = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device )
UpperCAmelCase : Tuple = eta
UpperCAmelCase : Optional[int] = self.scheduler.timesteps[0] + 1
UpperCAmelCase : List[Any] = generator[0] if isinstance(lowercase_ , lowercase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCAmelCase : str = self.unet(lowercase_ , lowercase_ ).sample
# compute previous image: x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCAmelCase : int = self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = t
UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 151 | 0 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : torch.FloatTensor
__A : Optional[torch.FloatTensor] =None
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ) -> List[str]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase_ : Any = []
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = i / num_diffusion_timesteps
UpperCAmelCase_ : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : List[Any] =1
@register_to_config
def __init__( self ,_snake_case = 10_00 ,_snake_case = 0.0001 ,_snake_case = 0.02 ,_snake_case = "linear" ,_snake_case = None ,_snake_case = True ,_snake_case = True ,_snake_case = 0 ,_snake_case = "epsilon" ,_snake_case = 1.0 ,**_snake_case ,):
if kwargs.get("set_alpha_to_one" ,_snake_case ) is not None:
UpperCAmelCase_ : str = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" ,"1.0.0" ,_snake_case ,standard_warn=_snake_case )
UpperCAmelCase_ : List[str] = kwargs["set_alpha_to_one"]
if trained_betas is not None:
UpperCAmelCase_ : Union[str, Any] = torch.tensor(_snake_case ,dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ : Union[str, Any] = torch.linspace(_snake_case ,_snake_case ,_snake_case ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_snake_case ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ : int = betas_for_alpha_bar(_snake_case )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCAmelCase_ : Optional[int] = 1.0 - self.betas
UpperCAmelCase_ : List[str] = torch.cumprod(self.alphas ,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
UpperCAmelCase_ : Optional[Any] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
UpperCAmelCase_ : Tuple = 1.0
# setable values
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(np.arange(0 ,_snake_case ).copy().astype(np.intaa ) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
return sample
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
UpperCAmelCase_ : List[Any] = num_inference_steps
UpperCAmelCase_ : List[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : str = (np.arange(0 ,_snake_case ) * step_ratio).round().copy().astype(np.intaa )
UpperCAmelCase_ : str = torch.from_numpy(_snake_case ).to(_snake_case )
self.timesteps += self.config.steps_offset
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 0.0 ,_snake_case = False ,_snake_case = None ,_snake_case = True ,):
# 1. get previous step value (=t+1)
UpperCAmelCase_ : Tuple = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
UpperCAmelCase_ : List[Any] = self.alphas_cumprod[timestep]
UpperCAmelCase_ : List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
UpperCAmelCase_ : Tuple = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
UpperCAmelCase_ : Optional[Any] = model_output
elif self.config.prediction_type == "sample":
UpperCAmelCase_ : Union[str, Any] = model_output
UpperCAmelCase_ : Union[str, Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
UpperCAmelCase_ : List[str] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ : List[str] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_snake_case ,pred_original_sample=_snake_case )
def __len__( self ):
return self.config.num_train_timesteps
| 67 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def a__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = 1.6021E-19 # units = C
def lowerCAmelCase__ ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | """simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 45
SCREAMING_SNAKE_CASE__ = 1_581
SCREAMING_SNAKE_CASE__ = 1_517
SCREAMING_SNAKE_CASE__ = 1_570
SCREAMING_SNAKE_CASE__ = 1_584
SCREAMING_SNAKE_CASE__ = 1_793
SCREAMING_SNAKE_CASE__ = 1_795
SCREAMING_SNAKE_CASE__ = 1_916
SCREAMING_SNAKE_CASE__ = 1_864
SCREAMING_SNAKE_CASE__ = 1_905
SCREAMING_SNAKE_CASE__ = 1_919
SCREAMING_SNAKE_CASE__ = 2_429
SCREAMING_SNAKE_CASE__ = 2_208
SCREAMING_SNAKE_CASE__ = 2_418
SCREAMING_SNAKE_CASE__ = 2_323
SCREAMING_SNAKE_CASE__ = 2_407
# @@protoc_insertion_point(module_scope)
| 150 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 354 | """simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( a_: Union[str, Any], a_: Dict ):
assert isinstance(__snake_case, __snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True] )
def __UpperCAmelCase ( a_: Dict, a_: Union[str, Any], a_: Optional[int] ):
_UpperCAmelCase : List[str] = tmp_path / "cache"
_UpperCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : List[Any] = ParquetDatasetReader(__snake_case, cache_dir=__snake_case, keep_in_memory=__snake_case ).read()
_check_parquet_dataset(__snake_case, __snake_case )
@pytest.mark.parametrize(
"features", [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
], )
def __UpperCAmelCase ( a_: Tuple, a_: List[str], a_: Any ):
_UpperCAmelCase : int = tmp_path / "cache"
_UpperCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : Any = features.copy() if features else default_expected_features
_UpperCAmelCase : int = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Optional[Any] = ParquetDatasetReader(__snake_case, features=__snake_case, cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case, __snake_case )
@pytest.mark.parametrize("split", [None, NamedSplit("train" ), "train", "test"] )
def __UpperCAmelCase ( a_: Union[str, Any], a_: str, a_: str ):
_UpperCAmelCase : Dict = tmp_path / "cache"
_UpperCAmelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : str = ParquetDatasetReader(__snake_case, cache_dir=__snake_case, split=__snake_case ).read()
_check_parquet_dataset(__snake_case, __snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list] )
def __UpperCAmelCase ( a_: str, a_: Union[str, Any], a_: List[str] ):
if issubclass(__snake_case, __snake_case ):
_UpperCAmelCase : Optional[Any] = parquet_path
elif issubclass(__snake_case, __snake_case ):
_UpperCAmelCase : Optional[int] = [parquet_path]
_UpperCAmelCase : str = tmp_path / "cache"
_UpperCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : Dict = ParquetDatasetReader(__snake_case, cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case, __snake_case )
def __UpperCAmelCase ( a_: Optional[Any], a_: Optional[Any], a_: str=("train",) ):
assert isinstance(__snake_case, __snake_case )
for split in splits:
_UpperCAmelCase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True] )
def __UpperCAmelCase ( a_: Tuple, a_: List[Any], a_: List[Any] ):
_UpperCAmelCase : Tuple = tmp_path / "cache"
_UpperCAmelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Optional[Any] = ParquetDatasetReader(
{"train": parquet_path}, cache_dir=__snake_case, keep_in_memory=__snake_case ).read()
_check_parquet_datasetdict(__snake_case, __snake_case )
@pytest.mark.parametrize(
"features", [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
], )
def __UpperCAmelCase ( a_: List[str], a_: Optional[Any], a_: List[Any] ):
_UpperCAmelCase : Optional[Any] = tmp_path / "cache"
_UpperCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : str = features.copy() if features else default_expected_features
_UpperCAmelCase : List[Any] = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : int = ParquetDatasetReader({"train": parquet_path}, features=__snake_case, cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case, __snake_case )
@pytest.mark.parametrize("split", [None, NamedSplit("train" ), "train", "test"] )
def __UpperCAmelCase ( a_: List[str], a_: List[str], a_: Tuple ):
if split:
_UpperCAmelCase : int = {split: parquet_path}
else:
_UpperCAmelCase : Any = "train"
_UpperCAmelCase : Optional[Any] = {"train": parquet_path, "test": parquet_path}
_UpperCAmelCase : str = tmp_path / "cache"
_UpperCAmelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : Optional[Any] = ParquetDatasetReader(__snake_case, cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case, __snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCAmelCase ( a_: List[Any], a_: Dict ):
_UpperCAmelCase : Dict = ParquetDatasetWriter(__snake_case, tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase : int = pq.ParquetFile(tmp_path / "foo.parquet" )
_UpperCAmelCase : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def __UpperCAmelCase ( a_: Tuple, a_: int ):
_UpperCAmelCase : Optional[Any] = str(shared_datadir / "test_image_rgb.jpg" )
_UpperCAmelCase : Tuple = {"image": [image_path]}
_UpperCAmelCase : List[str] = Features({"image": Image()} )
_UpperCAmelCase : Any = Dataset.from_dict(__snake_case, features=__snake_case )
_UpperCAmelCase : Union[str, Any] = ParquetDatasetWriter(__snake_case, tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase : int = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase : str = ParquetDatasetReader(str(tmp_path / "foo.parquet" ), streaming=__snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected", [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __UpperCAmelCase ( a_: Dict, a_: Optional[int] ):
assert get_writer_batch_size(__snake_case ) == expected | 145 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _UpperCAmelCase ( self ) -> str:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__UpperCAmelCase , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 153 | 0 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase__ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
lowerCAmelCase__ = '''hopper-medium-v2'''
lowerCAmelCase__ = gym.make(env_name)
lowerCAmelCase__ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
lowerCAmelCase__ = env.reset()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1000
lowerCAmelCase__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase__ = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = env.step(denorm_actions)
lowerCAmelCase__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase__ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 104 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase_ = state_dict["cls.predictions.decoder.weight"]
UpperCamelCase_ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 251 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def a__ ( __SCREAMING_SNAKE_CASE = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).lstrip("./" )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
return F"{i * ' '}*" if i else "\n##"
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(__SCREAMING_SNAKE_CASE )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def a__ ( __SCREAMING_SNAKE_CASE = "." ) -> None:
__lowerCAmelCase: int = ""
for filepath in sorted(good_file_paths(__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase , __lowerCAmelCase: int = os.path.split(__SCREAMING_SNAKE_CASE )
if filepath != old_path:
__lowerCAmelCase: Tuple = print_path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCAmelCase: Optional[int] = F"{filepath}/{filename}".replace(" " , "%20" )
__lowerCAmelCase: Tuple = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F"{md_prefix(__SCREAMING_SNAKE_CASE )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(".")
| 108 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__A = "src/transformers"
# Matches is_xxx_available()
__A = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__A = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__A = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__A = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__A = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__A = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__A = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__A = re.compile(r"^\s*try:")
# Catches a line with else:
__A = re.compile(r"^\s*else:")
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
__lowerCAmelCase: Union[str, Any] = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase: Optional[int] = f.readlines()
__lowerCAmelCase: Dict = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCAmelCase: Optional[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__lowerCAmelCase: Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
__lowerCAmelCase: List[Any] = re.findall("\[([^\]]+)\]" , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__lowerCAmelCase: str = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowerCAmelCase: str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__lowerCAmelCase: Tuple = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCAmelCase: Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__lowerCAmelCase: Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
__lowerCAmelCase: Union[str, Any] = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
__lowerCAmelCase: int = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
__lowerCAmelCase: Tuple = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
__lowerCAmelCase: Tuple = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCAmelCase: str = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__lowerCAmelCase: List[Any] = lines[line_index]
__lowerCAmelCase: Tuple = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCAmelCase: Any = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCAmelCase: Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__lowerCAmelCase: Optional[int] = lines[line_index]
__lowerCAmelCase: Any = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCAmelCase: Optional[int] = []
for key in import_dict_objects.keys():
__lowerCAmelCase: Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowerCAmelCase: Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCAmelCase: Union[str, Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def a__ ( ) -> Tuple:
__lowerCAmelCase: Optional[Any] = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowerCAmelCase: List[str] = os.path.join(__SCREAMING_SNAKE_CASE , "__init__.py" )
__lowerCAmelCase: int = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
__lowerCAmelCase: Optional[Any] = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("\n\n".join(__SCREAMING_SNAKE_CASE ) )
def a__ ( ) -> Any:
__lowerCAmelCase: Optional[int] = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob("*.py" ) ) ) == 0:
continue
__lowerCAmelCase: Optional[int] = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = short_path.replace(os.path.sep , "." )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowerCAmelCase: Dict = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
__A = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def a__ ( ) -> Optional[int]:
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase: Optional[Any] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__SCREAMING_SNAKE_CASE , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowerCAmelCase: Optional[int] = spec.loader.load_module()
__lowerCAmelCase: str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Optional[int] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 108 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a__: Union[str, Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: List[Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Optional[int] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a__: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 193 |
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Optional[int]:
__a = parent
__a = 1_3
__a = 7
__a = True
__a = True
__a = True
__a = True
__a = 9_9
__a = 3_8_4
__a = 2
__a = 4
__a = 3_7
__a = 'gelu'
__a = 0.1
__a = 0.1
__a = 5_1_2
__a = 1_6
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = 1_2_8
__a = 2
__a = 9
__a = 1
__a = None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
__a = TFConvBertModel(config=UpperCAmelCase )
__a = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__a = [input_ids, input_mask]
__a = model(UpperCAmelCase )
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
__a = TFConvBertForMaskedLM(config=UpperCAmelCase )
__a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
__a = self.num_labels
__a = TFConvBertForSequenceClassification(config=UpperCAmelCase )
__a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
__a = self.num_choices
__a = TFConvBertForMultipleChoice(config=UpperCAmelCase )
__a = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__a = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
__a = self.num_labels
__a = TFConvBertForTokenClassification(config=UpperCAmelCase )
__a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
__a = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
__a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a__ ( __snake_case , __snake_case , unittest.TestCase ):
A__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ : Dict = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Optional[int] = False
A__ : Dict = False
A__ : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = TFConvBertModelTester(self )
__a = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = True
if hasattr(UpperCAmelCase , 'use_cache' ):
__a = True
__a = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__a = getattr(self.model_tester , 'key_length' , UpperCAmelCase )
for model_class in self.all_model_classes:
__a = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__a = model_class(UpperCAmelCase )
__a = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
__a = os.path.join(UpperCAmelCase , 'saved_model' , '1' )
__a = tf.keras.models.load_model(UpperCAmelCase )
__a = model(UpperCAmelCase )
if self.is_encoder_decoder:
__a = outputs['encoder_hidden_states']
__a = outputs['encoder_attentions']
else:
__a = outputs['hidden_states']
__a = outputs['attentions']
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
__a = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__a = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__a = getattr(self.model_tester , 'key_length' , UpperCAmelCase )
__a = getattr(self.model_tester , 'key_length' , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase ):
__a = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__a = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase ):
__a = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__a = True
__a = False
__a = model_class(UpperCAmelCase )
__a = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__a = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
__a = model_class(UpperCAmelCase )
__a = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(UpperCAmelCase )
__a = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(UpperCAmelCase )
__a = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(UpperCAmelCase )[0]
__a = [1, 6, 7_6_8]
self.assertEqual(output.shape , UpperCAmelCase )
__a = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 197 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class a__ ( __snake_case ):
def __init__( self , **UpperCAmelCase ) -> List[str]:
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , 'vision' )
self.check_model_type(UpperCAmelCase )
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> Tuple:
if "text_queries" in kwargs:
__a = kwargs.pop('text_queries' )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
__a = {'image': image, 'candidate_labels': candidate_labels}
else:
__a = image
__a = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> List[str]:
__a = {}
if "threshold" in kwargs:
__a = kwargs['threshold']
if "top_k" in kwargs:
__a = kwargs['top_k']
return {}, {}, postprocess_params
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Union[str, Any]:
__a = load_image(inputs['image'] )
__a = inputs['candidate_labels']
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__a = candidate_labels.split(',' )
__a = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
__a = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
__a = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
__a = model_inputs.pop('target_size' )
__a = model_inputs.pop('candidate_label' )
__a = model_inputs.pop('is_last' )
__a = self.model(**UpperCAmelCase )
__a = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=0.1 , UpperCAmelCase=None ) -> Tuple:
__a = []
for model_output in model_outputs:
__a = model_output['candidate_label']
__a = BaseModelOutput(UpperCAmelCase )
__a = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
__a = outputs['scores'][index].item()
__a = self._get_bounding_box(outputs['boxes'][index][0] )
__a = {'score': score, 'label': label, 'box': box}
results.append(UpperCAmelCase )
__a = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
__a = results[:top_k]
return results
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
__a , __a , __a , __a = box.int().tolist()
__a = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 197 | 1 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def snake_case_ ( A_ : bytes ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) != 32:
raise ValueError('''Input must be of length 32''' )
_lowerCamelCase : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ ( A_ : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_lowerCamelCase : Optional[int] = format(SCREAMING_SNAKE_CASE__, '''08x''' )[-8:]
_lowerCamelCase : List[Any] = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ ( A_ : bytes ):
'''simple docstring'''
_lowerCamelCase : Dict = b''
for char in message:
bit_string += format(SCREAMING_SNAKE_CASE__, '''08b''' ).encode('''utf-8''' )
_lowerCamelCase : Dict = format(len(SCREAMING_SNAKE_CASE__ ), '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(SCREAMING_SNAKE_CASE__ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ ( A_ : bytes ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) % 5_12 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0, len(SCREAMING_SNAKE_CASE__ ), 5_12 ):
_lowerCamelCase : str = bit_string[pos : pos + 5_12]
_lowerCamelCase : Tuple = []
for i in range(0, 5_12, 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) )
yield block_words
def snake_case_ ( A_ : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_lowerCamelCase : List[str] = format(SCREAMING_SNAKE_CASE__, '''032b''' )
_lowerCamelCase : List[str] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(SCREAMING_SNAKE_CASE__, 2 )
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ ( A_ : bytes ):
'''simple docstring'''
_lowerCamelCase : int = preprocess(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Dict = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowerCamelCase : str = 0x67452301
_lowerCamelCase : Union[str, Any] = 0xEFCDAB89
_lowerCamelCase : List[str] = 0x98BADCFE
_lowerCamelCase : int = 0x10325476
_lowerCamelCase : Optional[int] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : Union[str, Any] = aa
_lowerCamelCase : List[Any] = ba
_lowerCamelCase : Dict = ca
_lowerCamelCase : List[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowerCamelCase : Optional[Any] = d ^ (b & (c ^ d))
_lowerCamelCase : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowerCamelCase : Optional[int] = c ^ (d & (b ^ c))
_lowerCamelCase : List[str] = (5 * i + 1) % 16
elif i <= 47:
_lowerCamelCase : str = b ^ c ^ d
_lowerCamelCase : int = (3 * i + 5) % 16
else:
_lowerCamelCase : Optional[Any] = c ^ (b | not_aa(SCREAMING_SNAKE_CASE__ ))
_lowerCamelCase : Dict = (7 * i) % 16
_lowerCamelCase : Any = (f + a + added_consts[i] + block_words[g]) % 2**32
_lowerCamelCase : str = d
_lowerCamelCase : Any = c
_lowerCamelCase : Union[str, Any] = b
_lowerCamelCase : Dict = sum_aa(SCREAMING_SNAKE_CASE__, left_rotate_aa(SCREAMING_SNAKE_CASE__, shift_amounts[i] ) )
# Add hashed chunk to running total
_lowerCamelCase : Optional[Any] = sum_aa(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[str] = sum_aa(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[int] = sum_aa(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[str] = sum_aa(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] = reformat_hex(SCREAMING_SNAKE_CASE__ ) + reformat_hex(SCREAMING_SNAKE_CASE__ ) + reformat_hex(SCREAMING_SNAKE_CASE__ ) + reformat_hex(SCREAMING_SNAKE_CASE__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = '▁'
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Tuple =BigBirdTokenizer
UpperCamelCase__ : Union[str, Any] =BigBirdTokenizerFast
UpperCamelCase__ : Any =True
UpperCamelCase__ : Optional[Any] =True
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
_lowerCamelCase : List[Any] =self.tokenizer_class(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : List[Any] ='<s>'
_lowerCamelCase : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(lowercase_ ) , 1004 )
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Union[str, Any] =self.get_tokenizer()
_lowerCamelCase : int =self.get_rust_tokenizer()
_lowerCamelCase : int ='I was born in 92000, and this is falsé.'
_lowerCamelCase : int =tokenizer.tokenize(lowercase_ )
_lowerCamelCase : List[Any] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Any =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : str =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : str =self.get_rust_tokenizer()
_lowerCamelCase : Union[str, Any] =tokenizer.encode(lowercase_ )
_lowerCamelCase : List[Any] =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str =BigBirdTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : int =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_lowerCamelCase : Optional[Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Any =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCamelCase : Optional[int] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[str] ='Hello World!'
_lowerCamelCase : Tuple =[65, 1_8536, 2260, 101, 66]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
_lowerCamelCase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_lowerCamelCase : Union[str, Any] =list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCamelCase : List[Any] =' '.join(lowercase_ )
_lowerCamelCase : List[str] =self.big_tokenizer.encode_plus(lowercase_ , return_tensors='pt' , return_token_type_ids=lowercase_ )
_lowerCamelCase : Optional[int] =self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase_ )
_lowerCamelCase : List[str] =BigBirdConfig(attention_type='original_full' )
_lowerCamelCase : Optional[Any] =BigBirdModel(lowercase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
_lowerCamelCase : int =tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ={'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 199 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=33 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : List[Any]=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=5_12 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Tuple=None , ) -> int:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
A__ = EsmModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
A__ = EsmForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
A__ = self.num_labels
A__ = EsmForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Any ) -> Dict:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = False
__lowerCamelCase : Union[str, Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[Any] = ()
__lowerCamelCase : Optional[int] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Any = True
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = EsmModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def a_ ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = EsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=__lowerCAmelCase )
A__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
A__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
A__ = create_position_ids_from_input_ids(__lowerCAmelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCAmelCase , __lowerCAmelCase ) ) )
def a_ ( self : List[Any] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=__lowerCAmelCase )
A__ = torch.empty(2 , 4 , 30 )
A__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
A__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
A__ = embeddings.create_position_ids_from_inputs_embeds(__lowerCAmelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCAmelCase , __lowerCAmelCase ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@require_torch
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
A__ = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ = model(__lowerCAmelCase )[0]
A__ = 33
A__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
A__ = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def a_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
A__ = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 276 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = BlenderbotSmallConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : List[Any] = '''gelu'''
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[str]=20 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : int=0 , ) -> Any:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = TFBlenderbotSmallModel(config=__lowerCAmelCase ).get_decoder()
A__ = inputs_dict["""input_ids"""]
A__ = input_ids[:1, :]
A__ = inputs_dict["""attention_mask"""][:1, :]
A__ = inputs_dict["""head_mask"""]
A__ = 1
# first forward pass
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def __lowerCamelCase ( __a :Dict , __a :Tuple , __a :List[Any] , __a :List[str]=None , __a :List[Any]=None , __a :Optional[Any]=None , __a :List[str]=None , __a :int=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase : List[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Tuple = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Dict = True
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Tuple = False
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_tokenizers
@require_tf
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
__lowerCamelCase : Optional[int] = '''facebook/blenderbot_small-90M'''
@cached_property
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def a_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , return_tensors="""tf""" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCAmelCase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 276 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase__ = TypeVar("T")
UpperCAmelCase__ = Union[List[T], Tuple[T, ...]]
UpperCAmelCase__ = Union[T, List[T], Dict[str, T]]
UpperCAmelCase__ = Union[str, bytes, os.PathLike]
| 339 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Dict = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AltDiffusionPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self : int ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=0 ) -> Dict:
if str(__magic_name__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : str ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __A ( self : Optional[Any] ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE_ = RobertaSeriesModelWithTransformation(__magic_name__ )
SCREAMING_SNAKE_CASE_ = text_encoder
SCREAMING_SNAKE_CASE_ = AltDiffusionPipeline(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "A photo of an astronaut"
SCREAMING_SNAKE_CASE_ = alt_pipe(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=__magic_name__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE_ = RobertaSeriesModelWithTransformation(__magic_name__ )
SCREAMING_SNAKE_CASE_ = text_encoder
SCREAMING_SNAKE_CASE_ = AltDiffusionPipeline(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = alt_pipe(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ) -> List[Any]:
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=__magic_name__ )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
SCREAMING_SNAKE_CASE_ = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=__magic_name__ , safety_checker=__magic_name__ )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe([prompt] , generator=__magic_name__ , num_inference_steps=2 , output_type="numpy" )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 305 | import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 305 | 1 |
import enum
import shutil
import sys
A , A : Dict = shutil.get_terminal_size()
A : str = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __A( enum.Enum ):
snake_case_ = 0
snake_case_ = 1
def __lowerCAmelCase ( a__ , a__="" ) -> Optional[int]:
sys.stdout.write(str(a__ ) + end )
sys.stdout.flush()
def __lowerCAmelCase ( a__ , a__ , a__="" ) -> Dict:
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , a__ )
def __lowerCAmelCase ( ) -> Any:
forceWrite('''\r''' )
def __lowerCAmelCase ( a__ , a__ ) -> Any:
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __lowerCAmelCase ( ) -> int:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCAmelCase ( ) -> Dict:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH ) | 6 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def UpperCAmelCase_ (self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ (self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = TFViTModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCamelCase__ = self.image_size // 2
UpperCamelCase__ = pixel_values[:, :, :image_size, :image_size]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = TFViTForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCamelCase__ = self.image_size // 2
UpperCamelCase__ = pixel_values[:, :, :image_size, :image_size]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = TFViTForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFViTModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Layer ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ (self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 244 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(a )
lowerCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(a )
lowerCAmelCase__ : Optional[Any] = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase__ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase__ : str = model.generate(**a )
lowerCAmelCase__ : Tuple = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(a )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase__ : List[str] = model_reloaded.generate(**a )
self.assertTrue(torch.allclose(a , a ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(a )
lowerCAmelCase__ : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a ):
model.save_pretrained(a )
lowerCAmelCase__ : str = model.reverse_bettertransformer()
model.save_pretrained(a ) | 362 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) ) | 307 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Any = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
__A : Tuple = {"mobilebert-uncased": 512}
__A : int = {}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = MobileBertTokenizer
def __init__( self : str , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : str=None , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]="[UNK]" , __UpperCamelCase : List[str]="[SEP]" , __UpperCamelCase : Optional[int]="[PAD]" , __UpperCamelCase : Any="[CLS]" , __UpperCamelCase : str="[MASK]" , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Tuple=None , **__UpperCamelCase : Union[str, Any] , )->Union[str, Any]:
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCamelCase ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(__UpperCamelCase , normalizer_state.pop('''type''' ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**__UpperCamelCase )
_UpperCAmelCase = do_lower_case
def lowercase__ ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any=None )->Any:
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 260 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : List[str] = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """poolformer"""
def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict:
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = stride
_UpperCAmelCase = padding
_UpperCAmelCase = pool_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = depths
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = num_encoder_blocks
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_layer_scale
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = initializer_range
super().__init__(**__UpperCamelCase )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = version.parse("""1.11""")
@property
def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self : Tuple )->float:
return 2e-3
| 260 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
a : Optional[int] = random.Random()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=1.0, __UpperCAmelCase=None, __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a ( unittest.TestCase ):
def __init__( self : Any , lowercase_ : int , lowercase_ : List[str]=7 , lowercase_ : List[str]=400 , lowercase_ : str=2000 , lowercase_ : Any=1 , lowercase_ : List[Any]=0.0 , lowercase_ : Any=1_6000 , lowercase_ : int=True , lowercase_ : List[str]=True , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = return_attention_mask
snake_case_ = do_normalize
def A_ ( self : Dict ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A_ ( self : Any , lowercase_ : Tuple=False , lowercase_ : Dict=False ):
def _flatten(lowercase_ : Dict ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
snake_case_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = WavaVecaFeatureExtractor
def A_ ( self : Tuple ):
snake_case_ = WavaVecaFeatureExtractionTester(self )
def A_ ( self : Tuple , lowercase_ : Optional[int] ):
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1e-3 ) )
def A_ ( self : Optional[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test batched
snake_case_ = feat_extract(lowercase_ , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(lowercase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ = np.asarray(lowercase_ )
snake_case_ = feat_extract(lowercase_ , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(lowercase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def A_ ( self : Tuple ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
snake_case_ = feat_extract(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A_ ( self : Dict ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = range(800 , 1400 , 200 )
snake_case_ = [floats_list((1, x) )[0] for x in lengths]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
snake_case_ = feat_extract(lowercase_ , max_length=lowercase_ , padding=lowercase_ )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A_ ( self : List[str] ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A_ ( self : Dict ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def A_ ( self : List[Any] ):
import torch
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(100 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def A_ ( self : str ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case_ = WavaVecaConfig.from_pretrained(lowercase_ )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 72 |
'''simple docstring'''
a : Dict = 6_5521
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = 0
for plain_chr in plain_text:
snake_case_ = (a + ord(__UpperCAmelCase )) % MOD_ADLER
snake_case_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 72 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""CLIPFeatureExtractor"""]
lowerCamelCase__ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
UpperCamelCase_ , UpperCamelCase_ = coefficient_matrix.shape
UpperCamelCase_ , UpperCamelCase_ = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase_ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase)
if colsa != 1:
UpperCamelCase_ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase)
if rowsa != rowsa:
UpperCamelCase_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase)
if len(_lowerCAmelCase) != rowsa:
UpperCamelCase_ = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(_lowerCAmelCase)} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase)
if iterations <= 0:
raise ValueError("Iterations must be at least 1")
UpperCamelCase_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
UpperCamelCase_ , UpperCamelCase_ = table.shape
strictly_diagonally_dominant(_lowerCAmelCase)
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase):
UpperCamelCase_ = []
for row in range(_lowerCAmelCase):
UpperCamelCase_ = 0
for col in range(_lowerCAmelCase):
if col == row:
UpperCamelCase_ = table[row][col]
elif col == cols - 1:
UpperCamelCase_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase_ = (temp + val) / denom
new_val.append(_lowerCAmelCase)
UpperCamelCase_ = new_val
return [float(_lowerCAmelCase) for i in new_val]
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = table.shape
UpperCamelCase_ = True
for i in range(0 , _lowerCAmelCase):
UpperCamelCase_ = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(UpperCamelCase__, '''_dynamo''' ):
return False
return isinstance(UpperCamelCase__, torch._dynamo.eval_frame.OptimizedModule )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Any =is_compiled_module(UpperCamelCase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Dict =model
SCREAMING_SNAKE_CASE__ : Dict =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : Optional[Any] =getattr(UpperCamelCase__, '''forward''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model.__dict__.pop('''_original_forward''', UpperCamelCase__ )
if original_forward is not None:
while hasattr(UpperCamelCase__, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] =forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Optional[Any] =forward
if getattr(UpperCamelCase__, '''_converted_to_transformer_engine''', UpperCamelCase__ ):
convert_model(UpperCamelCase__, to_transformer_engine=UpperCamelCase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model
SCREAMING_SNAKE_CASE__ : Tuple =compiled_model
return model
def _a( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCamelCase__, UpperCamelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCamelCase__, UpperCamelCase__ )
@contextmanager
def _a( **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : List[Any] =str(UpperCamelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
if not hasattr(UpperCamelCase__, '''__qualname__''' ) and not hasattr(UpperCamelCase__, '''__name__''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] =getattr(UpperCamelCase__, '''__class__''', UpperCamelCase__ )
if hasattr(UpperCamelCase__, '''__qualname__''' ):
return obj.__qualname__
if hasattr(UpperCamelCase__, '''__name__''' ):
return obj.__name__
return str(UpperCamelCase__ )
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int ):
'''simple docstring'''
for key, value in source.items():
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =destination.setdefault(UpperCamelCase__, {} )
merge_dicts(UpperCamelCase__, UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : List[str] =value
return destination
def _a( UpperCamelCase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : Any =2_9_5_0_0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 356 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : List[str] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : List[Any] , ) -> None:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =size if size is not None else {'''shortest_edge''': 2_56}
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Tuple =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : int =do_resize
SCREAMING_SNAKE_CASE__ : Dict =size
SCREAMING_SNAKE_CASE__ : List[str] =resample
SCREAMING_SNAKE_CASE__ : List[Any] =do_center_crop
SCREAMING_SNAKE_CASE__ : str =crop_size
SCREAMING_SNAKE_CASE__ : List[str] =do_rescale
SCREAMING_SNAKE_CASE__ : Optional[Any] =rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize
SCREAMING_SNAKE_CASE__ : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : int , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Dict , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : List[Any] , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : int , ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : int =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : List[str] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : int =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Any =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : str =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Dict =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Tuple =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : List[Tuple] = None ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowercase ):
SCREAMING_SNAKE_CASE__ : int =target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : List[Any] =[]
for idx in range(len(__lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 222 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase (metaclass=UpperCamelCase_ ):
lowerCamelCase__ : Any = ['onnx']
def __init__( self : str , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["""onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) -> Any:
requires_backends(cls , ["""onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ) -> List[Any]:
requires_backends(cls , ["""onnx"""] )
| 165 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A__ : Union[str, Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase_ )
class __snake_case ( UpperCamelCase_ ):
_a = '''rag'''
_a = True
def __init__( self : str , A_ : List[Any]=None , A_ : str=True , A_ : Tuple=None , A_ : Union[str, Any]=None , A_ : List[str]=None , A_ : List[str]=None , A_ : List[Any]=None , A_ : Union[str, Any]=" / " , A_ : Tuple=" // " , A_ : Any=5 , A_ : Optional[Any]=3_0_0 , A_ : Tuple=7_6_8 , A_ : Union[str, Any]=8 , A_ : Dict="wiki_dpr" , A_ : Optional[Any]="train" , A_ : Dict="compressed" , A_ : Optional[int]=None , A_ : List[str]=None , A_ : str=False , A_ : Dict=False , A_ : Dict=0.0 , A_ : List[str]=True , A_ : List[str]=False , A_ : List[Any]=False , A_ : Any=False , A_ : Optional[int]=True , A_ : int=None , **A_ : List[str] , ):
super().__init__(
bos_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , is_encoder_decoder=A_ , prefix=A_ , vocab_size=A_ , **A_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCAmelCase_ : List[str] = kwargs.pop('''question_encoder''')
lowerCAmelCase_ : Tuple = question_encoder_config.pop('''model_type''')
lowerCAmelCase_ : Tuple = kwargs.pop('''generator''')
lowerCAmelCase_ : Dict = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : int = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : List[Any] = reduce_loss
lowerCAmelCase_ : Optional[Any] = label_smoothing
lowerCAmelCase_ : Union[str, Any] = exclude_bos_score
lowerCAmelCase_ : List[Any] = do_marginalize
lowerCAmelCase_ : int = title_sep
lowerCAmelCase_ : Optional[int] = doc_sep
lowerCAmelCase_ : List[str] = n_docs
lowerCAmelCase_ : int = max_combined_length
lowerCAmelCase_ : Union[str, Any] = dataset
lowerCAmelCase_ : int = dataset_split
lowerCAmelCase_ : Dict = index_name
lowerCAmelCase_ : Union[str, Any] = retrieval_vector_size
lowerCAmelCase_ : Optional[Any] = retrieval_batch_size
lowerCAmelCase_ : List[str] = passages_path
lowerCAmelCase_ : Any = index_path
lowerCAmelCase_ : int = use_dummy_dataset
lowerCAmelCase_ : Tuple = output_retrieved
lowerCAmelCase_ : List[Any] = do_deduplication
lowerCAmelCase_ : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
lowerCAmelCase_ : List[Any] = getattr(self.generator , '''forced_eos_token_id''' , A_)
@classmethod
def UpperCAmelCase__ ( cls : str , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : Any):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = copy.deepcopy(self.__dict__)
lowerCAmelCase_ : Tuple = self.question_encoder.to_dict()
lowerCAmelCase_ : Dict = self.generator.to_dict()
lowerCAmelCase_ : str = self.__class__.model_type
return output
| 103 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_UpperCAmelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[int] = {}
with open(_UpperCAmelCase , 'r' ) as file:
for line_number, line in enumerate(_UpperCAmelCase ):
lowerCamelCase__ : Dict = line.strip()
if line:
lowerCamelCase__ : Union[str, Any] = line.split()
lowerCamelCase__ : Tuple = line_number
lowerCamelCase__ : Dict = words[0]
lowerCamelCase__ : Optional[Any] = value
return result
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
for attribute in key.split('.' ):
lowerCamelCase__ : Tuple = getattr(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCAmelCase ):
lowerCamelCase__ : Any = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCamelCase__ : Any = 'param'
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : Optional[int] = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : List[str] = hf_pointer
for attribute in hf_param_name.split('.' ):
lowerCamelCase__ : Dict = getattr(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = shape_pointer.shape
# let's reduce dimension
lowerCamelCase__ : str = value[0]
else:
lowerCamelCase__ : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCamelCase__ : Tuple = value
elif weight_type == "weight_g":
lowerCamelCase__ : Tuple = value
elif weight_type == "weight_v":
lowerCamelCase__ : Union[str, Any] = value
elif weight_type == "bias":
lowerCamelCase__ : List[str] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
lowerCamelCase__ : List[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = value
else:
lowerCamelCase__ : Optional[int] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCAmelCase ):
lowerCamelCase__ : Tuple = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCamelCase__ : str = 'param'
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : List[str] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : Tuple = '.'.join([key, hf_param_name] )
else:
lowerCamelCase__ : List[Any] = key
lowerCamelCase__ : List[Any] = value if 'lm_head' in full_key else value[0]
_UpperCAmelCase : List[Any] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Union[str, Any]:
lowerCamelCase__ : List[str] = False
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Optional[int] = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : Optional[int] = True
if "*" in mapped_key:
lowerCamelCase__ : Optional[Any] = name.split(_UpperCAmelCase )[0].split('.' )[-2]
lowerCamelCase__ : Dict = mapped_key.replace('*' , _UpperCAmelCase )
if "weight_g" in name:
lowerCamelCase__ : Any = 'weight_g'
elif "weight_v" in name:
lowerCamelCase__ : str = 'weight_v'
elif "bias" in name:
lowerCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : Union[str, Any] = 'weight'
else:
lowerCamelCase__ : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return is_used
return is_used
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : int = []
lowerCamelCase__ : Optional[Any] = fairseq_model.state_dict()
lowerCamelCase__ : Tuple = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : Optional[int] = True
else:
lowerCamelCase__ : Optional[int] = load_wavaveca_layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Dict = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : Tuple = name.split('.' )
lowerCamelCase__ : Dict = int(items[0] )
lowerCamelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCamelCase__ : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCamelCase__ : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCamelCase__ : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCamelCase__ : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False ) -> Dict:
if config_path is not None:
lowerCamelCase__ : Dict = WavaVecaConfig.from_pretrained(_UpperCAmelCase )
else:
lowerCamelCase__ : Optional[int] = WavaVecaConfig()
if is_seq_class:
lowerCamelCase__ : Dict = read_txt_into_dict(_UpperCAmelCase )
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : Tuple = WavaVecaForSequenceClassification(_UpperCAmelCase )
lowerCamelCase__ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
feature_extractor.save_pretrained(_UpperCAmelCase )
elif is_finetuned:
if dict_path:
lowerCamelCase__ : Tuple = Dictionary.load(_UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : int = target_dict.pad_index
lowerCamelCase__ : List[Any] = target_dict.bos_index
lowerCamelCase__ : List[str] = target_dict.eos_index
lowerCamelCase__ : Optional[Any] = len(target_dict.symbols )
lowerCamelCase__ : str = os.path.join(_UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCAmelCase ) )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
lowerCamelCase__ : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : str = WavaVecaCTCTokenizer(
_UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCAmelCase , )
lowerCamelCase__ : Dict = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
lowerCamelCase__ : List[Any] = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
lowerCamelCase__ : List[str] = WavaVecaForCTC(_UpperCAmelCase )
else:
lowerCamelCase__ : Optional[int] = WavaVecaForPreTraining(_UpperCAmelCase )
if is_finetuned or is_seq_class:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCamelCase__ : Optional[int] = argparse.Namespace(task='audio_pretraining' )
lowerCamelCase__ : Dict = fairseq.tasks.setup_task(_UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCAmelCase )
lowerCamelCase__ : Dict = model[0].eval()
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 45 |
from collections import deque
class lowerCAmelCase :
def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int ) -> None:
lowerCamelCase__ : Optional[int] = process_name # process name
lowerCamelCase__ : Optional[int] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase__ : str = arrival_time
lowerCamelCase__ : List[Any] = burst_time # remaining burst time
lowerCamelCase__ : Any = 0 # total time of the process wait in ready queue
lowerCamelCase__ : Tuple = 0 # time from arrival time to completion time
class lowerCAmelCase :
def __init__( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : list[int] , UpperCAmelCase : deque[Process] , UpperCAmelCase : int , ) -> None:
# total number of mlfq's queues
lowerCamelCase__ : Optional[int] = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase__ : List[str] = time_slices
# unfinished process is in this ready_queue
lowerCamelCase__ : List[str] = queue
# current time
lowerCamelCase__ : Optional[Any] = current_time
# finished process is in this sequence queue
lowerCamelCase__ : deque[Process] = deque()
def A_ ( self : Tuple ) -> list[str]:
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A_ ( self : Tuple , UpperCAmelCase : list[Process] ) -> list[int]:
lowerCamelCase__ : Tuple = []
for i in range(len(UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A_ ( self : Union[str, Any] , UpperCAmelCase : list[Process] ) -> list[int]:
lowerCamelCase__ : int = []
for i in range(len(UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A_ ( self : Optional[int] , UpperCAmelCase : list[Process] ) -> list[int]:
lowerCamelCase__ : Tuple = []
for i in range(len(UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A_ ( self : str , UpperCAmelCase : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def A_ ( self : int , UpperCAmelCase : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A_ ( self : Optional[int] , UpperCAmelCase : deque[Process] ) -> deque[Process]:
lowerCamelCase__ : deque[Process] = deque() # sequence deque of finished process
while len(UpperCAmelCase ) != 0:
lowerCamelCase__ : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase__ : Optional[int] = 0
# set the process's turnaround time because it is finished
lowerCamelCase__ : Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase__ : Any = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A_ ( self : str , UpperCAmelCase : deque[Process] , UpperCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
lowerCamelCase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCAmelCase ) ):
lowerCamelCase__ : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase__ : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase__ : Any = 0
# set the finish time
lowerCamelCase__ : int = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase__ : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A_ ( self : Dict ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[str] = Process("""P1""", 0, 53)
_UpperCAmelCase : Union[str, Any] = Process("""P2""", 0, 17)
_UpperCAmelCase : int = Process("""P3""", 0, 68)
_UpperCAmelCase : str = Process("""P4""", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : Optional[Any] = [17, 25]
_UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("""P1""", 0, 53)
_UpperCAmelCase : Any = Process("""P2""", 0, 17)
_UpperCAmelCase : Any = Process("""P3""", 0, 68)
_UpperCAmelCase : List[Any] = Process("""P4""", 0, 24)
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Optional[int] = [17, 25]
_UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : Dict = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 45 | 1 |
_lowerCamelCase =[0, 2, 4, 6, 8]
_lowerCamelCase =[1, 3, 5, 7, 9]
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase : List[Any] = 0
for digit in range(10 ):
lowerCamelCase : Union[str, Any] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, lowerCamelCase, lowerCamelCase )
return result
lowerCamelCase : Tuple = 0
for digita in range(10 ):
lowerCamelCase : Optional[int] = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase : str = ODD_DIGITS
else:
lowerCamelCase : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase : List[Any] = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, lowerCamelCase, lowerCamelCase, )
return result
def _a ( lowerCamelCase = 9 ):
lowerCamelCase : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(lowerCamelCase, 0, [0] * length, lowerCamelCase )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 1 |
"""simple docstring"""
UpperCAmelCase: Optional[Any] = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 336 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int = 1_2_8 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : float = 2000.0 ,SCREAMING_SNAKE_CASE__ : int = 7_6_8 ,SCREAMING_SNAKE_CASE__ : int = 1_2 ,SCREAMING_SNAKE_CASE__ : int = 1_2 ,SCREAMING_SNAKE_CASE__ : int = 6_4 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,):
super().__init__()
__lowerCamelCase : Optional[Any] = nn.Sequential(
nn.Linear(SCREAMING_SNAKE_CASE__ ,d_model * 4 ,bias=SCREAMING_SNAKE_CASE__) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=SCREAMING_SNAKE_CASE__) ,nn.SiLU() ,)
__lowerCamelCase : Tuple = nn.Embedding(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = nn.Dropout(p=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE__):
# FiLM conditional T5 decoder
__lowerCamelCase : Optional[int] = DecoderLayer(d_model=SCREAMING_SNAKE_CASE__ ,d_kv=SCREAMING_SNAKE_CASE__ ,num_heads=SCREAMING_SNAKE_CASE__ ,d_ff=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__)
self.decoders.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = TaLayerNorm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = nn.Dropout(p=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Tuple = torch.mul(query_input.unsqueeze(-1) ,key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase : Dict = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype)
__lowerCamelCase : List[Any] = self.conditioning_emb(SCREAMING_SNAKE_CASE__).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase : Any = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase : Tuple = torch.broadcast_to(
torch.arange(SCREAMING_SNAKE_CASE__ ,device=decoder_input_tokens.device) ,(batch, seq_length) ,)
__lowerCamelCase : Tuple = self.position_encoding(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.continuous_inputs_projection(SCREAMING_SNAKE_CASE__)
inputs += position_encodings
__lowerCamelCase : str = self.dropout(SCREAMING_SNAKE_CASE__)
# decoder: No padding present.
__lowerCamelCase : List[str] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase : str = [(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1)
__lowerCamelCase : Optional[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1)
for lyr in self.decoders:
__lowerCamelCase : Tuple = lyr(
SCREAMING_SNAKE_CASE__ ,conditioning_emb=SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,encoder_attention_mask=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : List[str] = self.decoder_norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.post_dropout(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.spec_out(SCREAMING_SNAKE_CASE__)
return spec_out
class A_ ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple=1E-6):
super().__init__()
__lowerCamelCase : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE__ ,d_kv=SCREAMING_SNAKE_CASE__ ,num_heads=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=SCREAMING_SNAKE_CASE__ ,d_kv=SCREAMING_SNAKE_CASE__ ,num_heads=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__ ,layer_norm_epsilon=SCREAMING_SNAKE_CASE__ ,))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE__ ,d_ff=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__ ,layer_norm_epsilon=SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : str=None ,):
__lowerCamelCase : Any = self.layer[0](
SCREAMING_SNAKE_CASE__ ,conditioning_emb=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,)
if encoder_hidden_states is not None:
__lowerCamelCase : Tuple = torch.where(encoder_attention_mask > 0 ,0 ,-1E10).to(
encoder_hidden_states.dtype)
__lowerCamelCase : Any = self.layer[1](
SCREAMING_SNAKE_CASE__ ,key_value_states=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,)
# Apply Film Conditional Feed Forward layer
__lowerCamelCase : Tuple = self.layer[-1](SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return (hidden_states,)
class A_ ( nn.Module ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
super().__init__()
__lowerCamelCase : int = TaLayerNorm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = TaFiLMLayer(in_features=d_model * 4 ,out_features=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = Attention(query_dim=SCREAMING_SNAKE_CASE__ ,heads=SCREAMING_SNAKE_CASE__ ,dim_head=SCREAMING_SNAKE_CASE__ ,out_bias=SCREAMING_SNAKE_CASE__ ,scale_qk=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = nn.Dropout(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,):
# pre_self_attention_layer_norm
__lowerCamelCase : Dict = self.layer_norm(SCREAMING_SNAKE_CASE__)
if conditioning_emb is not None:
__lowerCamelCase : int = self.FiLMLayer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# Self-attention block
__lowerCamelCase : int = self.attention(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__)
return hidden_states
class A_ ( nn.Module ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__()
__lowerCamelCase : str = Attention(query_dim=SCREAMING_SNAKE_CASE__ ,heads=SCREAMING_SNAKE_CASE__ ,dim_head=SCREAMING_SNAKE_CASE__ ,out_bias=SCREAMING_SNAKE_CASE__ ,scale_qk=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = TaLayerNorm(SCREAMING_SNAKE_CASE__ ,eps=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = nn.Dropout(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,):
__lowerCamelCase : str = self.layer_norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.attention(
SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,attention_mask=attention_mask.squeeze(1) ,)
__lowerCamelCase : Optional[int] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__)
return layer_output
class A_ ( nn.Module ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__()
__lowerCamelCase : Union[str, Any] = TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE__ ,d_ff=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = TaLayerNorm(SCREAMING_SNAKE_CASE__ ,eps=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = nn.Dropout(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict=None):
__lowerCamelCase : List[Any] = self.layer_norm(SCREAMING_SNAKE_CASE__)
if conditioning_emb is not None:
__lowerCamelCase : int = self.film(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.DenseReluDense(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__)
return hidden_states
class A_ ( nn.Module ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = nn.Dropout(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = NewGELUActivation()
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : List[Any] = self.act(self.wi_a(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[int] = self.wi_a(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = hidden_gelu * hidden_linear
__lowerCamelCase : Tuple = self.dropout(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.wo(SCREAMING_SNAKE_CASE__)
return hidden_states
class A_ ( nn.Module ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-6):
super().__init__()
__lowerCamelCase : List[Any] = nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[Any] = eps
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : int):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowerCamelCase : List[str] = hidden_states.to(torch.floataa).pow(2).mean(-1 ,keepdim=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase : int = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class A_ ( nn.Module ):
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(SCREAMING_SNAKE_CASE__ ,3.0))))
class A_ ( nn.Module ):
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__()
__lowerCamelCase : List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ ,out_features * 2 ,bias=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : List[Any] = self.scale_bias(SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : str = torch.chunk(SCREAMING_SNAKE_CASE__ ,2 ,-1)
__lowerCamelCase : Tuple = x * (1 + scale) + shift
return x
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
__lowerCamelCase : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCamelCase : List[Any] = left
__lowerCamelCase : Tuple = point
elif point > right:
__lowerCamelCase : Dict = right
__lowerCamelCase : str = point
else:
if item < current_item:
__lowerCamelCase : Dict = point - 1
else:
__lowerCamelCase : Dict = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
if collection != sorted(lowerCamelCase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
a =0
if debug == 1:
a =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
a =67
a =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 73 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase (SCREAMING_SNAKE_CASE_ : list[int] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
"""simple docstring"""
from __future__ import annotations
def lowercase (SCREAMING_SNAKE_CASE_ : list[int] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
snake_case_ : int = "bert-base-cased"
snake_case_ : List[Any] = "google/pegasus-xsum"
snake_case_ : Optional[Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
snake_case_ : Any = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
snake_case_ : List[str] = "patrickvonplaten/t5-tiny-random"
snake_case_ : Tuple = "sshleifer/bart-tiny-random"
snake_case_ : List[str] = "sshleifer/tiny-mbart"
snake_case_ : List[Any] = "sshleifer/tiny-marian-en-de"
def A (__A : Path , __A : list ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = '''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
def A (__A : str ) -> Union[str, Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__A , F"""{split}.source""" ) , __A )
_dump_articles(os.path.join(__A , F"""{split}.target""" ) , __A )
return tmp_dir
class __snake_case ( a ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCamelCase ( self : List[str] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
UpperCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in ARTICLES)
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in SUMMARIES)
UpperCAmelCase_ = 4
UpperCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase_ , UpperCAmelCase_ = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=_snake_case , type_path='''train''' , max_source_length=_snake_case , max_target_length=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , )
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(_snake_case , _snake_case)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase_ = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def lowerCamelCase ( self : Optional[int] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
UpperCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in ARTICLES)
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in SUMMARIES)
UpperCAmelCase_ = 4
UpperCAmelCase_ = LegacySeqaSeqDataset(
_snake_case , data_dir=_snake_case , type_path='''train''' , max_source_length=20 , max_target_length=_snake_case , )
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
UpperCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
UpperCAmelCase_ = tmp_dir.joinpath('''train.source''').open().readlines()
UpperCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(_snake_case , _snake_case , 128 , _snake_case)
UpperCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase_ = {x.name for x in save_dir.iterdir()}
UpperCAmelCase_ = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_snake_case) < len(_snake_case)
assert len(_snake_case) == 1
assert len(packed_examples[0]) == sum(len(_snake_case) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_dataset(max_len=64)
UpperCAmelCase_ = 64
UpperCAmelCase_ = ds.make_dynamic_sampler(_snake_case , required_batch_size_multiple=_snake_case)
UpperCAmelCase_ = [len(_snake_case) for x in batch_sampler]
assert len(set(_snake_case)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_snake_case) == len(_snake_case) # no dropped or added examples
UpperCAmelCase_ = DataLoader(_snake_case , batch_sampler=_snake_case , collate_fn=ds.collate_fn , num_workers=2)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for batch in data_loader:
UpperCAmelCase_ = batch['''input_ids'''].shape
UpperCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase_ = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(_snake_case)
if num_src_tokens > (max_tokens * 1.1):
failures.append(_snake_case)
assert num_src_per_batch[0] == max(_snake_case)
if failures:
raise AssertionError(F"""too many tokens in {len(_snake_case)} batches""")
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_dataset(max_len=512)
UpperCAmelCase_ = 2
UpperCAmelCase_ = ds.make_sortish_sampler(_snake_case , shuffle=_snake_case)
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn , num_workers=2)
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn , num_workers=2 , sampler=_snake_case)
UpperCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_snake_case : List[Any] , _snake_case : Tuple="input_ids"):
return [batch[k].eq(_snake_case).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_snake_case , k='''labels''')) < sum(count_pad_tokens(_snake_case , k='''labels'''))
assert sum(count_pad_tokens(_snake_case)) < sum(count_pad_tokens(_snake_case))
assert len(_snake_case) == len(_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Dict=1000 , _snake_case : str=128):
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , _snake_case):
UpperCAmelCase_ = '''examples/seq2seq/wmt_en_ro'''
UpperCAmelCase_ = max_len * 2 * 64
if not Path(_snake_case).joinpath('''train.len''').exists():
save_len_file(_snake_case , _snake_case)
else:
UpperCAmelCase_ = '''examples/seq2seq/test_data/wmt_en_ro'''
UpperCAmelCase_ = max_len * 4
save_len_file(_snake_case , _snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=_snake_case , type_path='''train''' , max_source_length=_snake_case , max_target_length=_snake_case , n_obs=_snake_case , )
return ds, max_tokens, tokenizer
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_dataset()
UpperCAmelCase_ = set(DistributedSortishSampler(_snake_case , 256 , num_replicas=2 , rank=0 , add_extra_examples=_snake_case))
UpperCAmelCase_ = set(DistributedSortishSampler(_snake_case , 256 , num_replicas=2 , rank=1 , add_extra_examples=_snake_case))
assert idsa.intersection(_snake_case) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCamelCase ( self : List[str] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , use_fast=_snake_case)
if tok_name == MBART_TINY:
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
UpperCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
UpperCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_snake_case) == 1 if tok_name == BART_TINY else len(_snake_case) == 0
| 51 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = MgpstrTokenizer
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """tester"""
UpperCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
| 346 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 10 , snake_case_ : int = 1000 , snake_case_ : bool = True ):
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
return int((number_a + number_a) / 2 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ):
assert (
isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(snake_case_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
snake_case__ : Union[str, Any] = lower
snake_case__ : int = higher
snake_case__ : Tuple = []
while True:
snake_case__ : Optional[Any] = get_avg(snake_case_ , snake_case_ )
last_numbers.append(snake_case_ )
if answer(snake_case_ ) == "low":
snake_case__ : Tuple = number
elif answer(snake_case_ ) == "high":
snake_case__ : Tuple = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : List[Any] = int(input("Enter lower value : " ).strip() )
snake_case__ : Dict = int(input("Enter high value : " ).strip() )
snake_case__ : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 286 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "xmod"
def __init__( self : List[str] , __A : List[str]=3_0_5_2_2 , __A : Tuple=7_6_8 , __A : str=1_2 , __A : List[Any]=1_2 , __A : List[str]=3_0_7_2 , __A : List[str]="gelu" , __A : List[Any]=0.1 , __A : Tuple=0.1 , __A : str=5_1_2 , __A : Union[str, Any]=2 , __A : List[Any]=0.0_2 , __A : List[str]=1e-1_2 , __A : Tuple=1 , __A : List[Any]=0 , __A : Optional[Any]=2 , __A : Optional[int]="absolute" , __A : Optional[int]=True , __A : Dict=None , __A : Optional[int]=False , __A : Dict=2 , __A : List[str]=False , __A : Dict=True , __A : Union[str, Any]=True , __A : Tuple=("en_XX",) , __A : Optional[Any]=None , **__A : Tuple , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : List[str] = adapter_reduction_factor
snake_case__ : List[Any] = adapter_layer_norm
snake_case__ : str = adapter_reuse_layer_norm
snake_case__ : Union[str, Any] = ln_before_adapter
snake_case__ : Tuple = list(__A )
snake_case__ : int = default_language
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : int ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase__ = '''__DUMMY_TRANSFORMERS_USER__'''
lowerCAmelCase__ = '''Dummy User'''
lowerCAmelCase__ = '''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'''
lowerCAmelCase__ = '''https://hub-ci.huggingface.co'''
lowerCAmelCase__ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowerCAmelCase__ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowerCAmelCase__ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCamelCase__ )
@pytest.fixture
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCamelCase__ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCamelCase__ )
@pytest.fixture
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCamelCase__ )
@pytest.fixture
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
HfFolder.save_token(lowerCamelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def __lowerCamelCase ( ):
"""simple docstring"""
return HfApi(endpoint=lowerCamelCase__ )
@pytest.fixture(scope="session" )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = HfFolder.get_token()
HfFolder.save_token(lowerCamelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCamelCase__ )
@pytest.fixture
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def _cleanup_repo(lowerCamelCase__ ):
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
@contextmanager
def _temporary_repo(lowerCamelCase__ ):
try:
yield repo_id
finally:
cleanup_repo(lowerCamelCase__ )
return _temporary_repo
@pytest.fixture(scope="session" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
lowercase__ : Union[str, Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="data/text_data.txt" , repo_id=lowerCamelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
lowercase__ : Tuple = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="data.zip" , repo_id=lowerCamelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
lowercase__ : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="data.zip" , repo_id=lowerCamelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 130 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = []
for part_id in partition_order:
lowercase__ : str = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(lowerCamelCase__ ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple = spark.range(100 ).repartition(1 )
lowercase__ : Tuple = Spark(lowerCamelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple = spark.range(10 ).repartition(2 )
lowercase__ : Any = [1, 0]
lowercase__ : Optional[int] = _generate_iterable_examples(lowerCamelCase__ , lowerCamelCase__ ) # Reverse the partitions.
lowercase__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , lowerCamelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__ , lowercase__ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int = spark.range(10 ).repartition(1 )
lowercase__ : Optional[int] = SparkExamplesIterable(lowerCamelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Optional[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : int = lambda lowerCamelCase__ : x.reverse()
lowercase__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [2, 1, 0] )
lowercase__ : int = SparkExamplesIterable(lowerCamelCase__ ).shuffle_data_sources(lowerCamelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[Any] = SparkExamplesIterable(lowerCamelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : int = SparkExamplesIterable(lowerCamelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int = spark.range(100 ).repartition(1 )
lowercase__ : Tuple = Spark(lowerCamelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 130 | 1 |
from math import factorial
def __lowercase ( a__ = 1_00 ) -> Tuple:
return sum(int(a__ ) for x in str(factorial(a__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 361 |
from __future__ import annotations
from collections.abc import Generator
def __lowercase ( ) -> Generator[int, None, None]:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 2
while True:
__SCREAMING_SNAKE_CASE = factor_map.pop(a__ , a__ )
if factor:
__SCREAMING_SNAKE_CASE = factor + prime
while x in factor_map:
x += factor
__SCREAMING_SNAKE_CASE = factor
else:
__SCREAMING_SNAKE_CASE = prime
yield prime
prime += 1
def __lowercase ( a__ = 1E10 ) -> int:
__SCREAMING_SNAKE_CASE = sieve()
__SCREAMING_SNAKE_CASE = 1
while True:
__SCREAMING_SNAKE_CASE = next(a__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(a__ )
n += 2
if __name__ == "__main__":
print(solution())
| 118 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int ):
"""simple docstring"""
UpperCamelCase = []
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.node_position[vertex]
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = pos
def A ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase = 2 * start + 1
else:
UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase = heap[smallest_child], positions[smallest_child]
UpperCamelCase = (
heap[start],
positions[start],
)
UpperCamelCase = temp, tempa
UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase__ )
self.top_to_bottom(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = position[index]
while index != 0:
UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase = heap[parent]
UpperCamelCase = position[parent]
self.set_position(position[parent] , UpperCamelCase__ )
else:
UpperCamelCase = val
UpperCamelCase = temp
self.set_position(UpperCamelCase__ , UpperCamelCase__ )
break
UpperCamelCase = parent
else:
UpperCamelCase = val
UpperCamelCase = temp
self.set_position(UpperCamelCase__ , 0 )
def A ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = len(UpperCamelCase__ ) // 2 - 1
for i in range(UpperCamelCase__ , -1 , -1 ):
self.top_to_bottom(UpperCamelCase__ , UpperCamelCase__ , len(UpperCamelCase__ ) , UpperCamelCase__ )
def A ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = positions[0]
UpperCamelCase = sys.maxsize
self.top_to_bottom(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) , UpperCamelCase__ )
return temp
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = Heap()
UpperCamelCase = [0] * len(A__ )
UpperCamelCase = [-1] * len(A__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase = []
for vertex in range(len(A__ ) ):
distance_tv.append(sys.maxsize )
positions.append(A__ )
heap.node_position.append(A__ )
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase = 0
UpperCamelCase = distance
heap.heapify(A__ , A__ )
for _ in range(1 , len(A__ ) ):
UpperCamelCase = heap.delete_minimum(A__ , A__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A__ )]
):
UpperCamelCase = distance
heap.bottom_to_top(
A__ , heap.get_position(A__ ) , A__ , A__ )
UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCamelCase : Any = int(input("Enter number of edges: ").strip())
_lowerCamelCase : Any = defaultdict(list)
for _ in range(edges_number):
_lowerCamelCase : Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 28 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'philschmid/bart-large-cnn-samsum'
lowercase = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
lowercase = 'summarizer'
lowercase = AutoTokenizer
lowercase = AutoModelForSeqaSeqLM
lowercase = ['text']
lowercase = ['text']
def __lowercase ( self : Dict , lowerCamelCase : Dict ) -> Any:
return self.pre_processor(lowerCamelCase , return_tensors="""pt""" , truncation=lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
return self.model.generate(**lowerCamelCase )[0]
def __lowercase ( self : str , lowerCamelCase : Dict ) -> List[Any]:
return self.pre_processor.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
| 120 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class a__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : Tuple = """focalnet"""
def __init__( self , A=224 , A=4 , A=3 , A=96 , A=False , A=[192, 384, 768, 768] , A=[2, 2, 6, 2] , A=[2, 2, 2, 2] , A=[3, 3, 3, 3] , A="gelu" , A=4.0 , A=0.0 , A=0.1 , A=False , A=1e-4 , A=False , A=False , A=False , A=0.0_2 , A=1e-5 , A=32 , A=None , A=None , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = use_conv_embed
a = hidden_sizes
a = depths
a = focal_levels
a = focal_windows
a = hidden_act
a = mlp_ratio
a = hidden_dropout_prob
a = drop_path_rate
a = use_layerscale
a = layerscale_value
a = use_post_layernorm
a = use_post_layernorm_in_modulation
a = normalize_modulator
a = initializer_range
a = layer_norm_eps
a = encoder_stride
a = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
| 180 |
import math
import sys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
if number != int(__UpperCamelCase):
raise ValueError("the value of input must be a natural number")
if number < 0:
raise ValueError("the value of input must not be a negative number")
if number == 0:
return 1
a = [-1] * (number + 1)
a = 0
for i in range(1 , number + 1):
a = sys.maxsize
a = int(math.sqrt(__UpperCamelCase))
for j in range(1 , root + 1):
a = 1 + answers[i - (j**2)]
a = min(__UpperCamelCase , __UpperCamelCase)
a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class a__ ( nn.Module ):
def __init__( self : str,_A : nn.Module,_A : int ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : str = module
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features,_A,bias=_A ),nn.Linear(_A,module.out_features,bias=_A ),)
SCREAMING_SNAKE_CASE_ : str = (2.0 / (5 * min(module.in_features,module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight,std=_A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCamelCase ( self : Optional[Any],_A : Dict,*_A : int,**_A : Dict ):
"""simple docstring"""
return self.module(_A,*_A,**_A ) + self.adapter(_A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A = 'bigscience/bloom-1b7'
# Constant values
A = 2.109_6595_5269_2574
A = 'Hello my name is'
A = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
A = 10
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class a__ ( A__ ):
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name,torch_dtype=torch.floataa,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Dict = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_abit.config
self.assertTrue(hasattr(_A,"quantization_config" ) )
SCREAMING_SNAKE_CASE_ : Any = config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = config.to_diff_dict()
SCREAMING_SNAKE_CASE_ : int = config.to_json_string()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE_ : str = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit,self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_A,torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(self.input_text,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : Any = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=_A ),self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name,quantization_config=_A,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(self.input_text,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=_A ),self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(_A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = BitsAndBytesConfig()
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name,quantization_config=_A,load_in_abit=_A,device_map="auto",bnb_abit_quant_type="nf4",)
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(_A ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(_A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_A ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(_A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(self.input_text,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : int = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ),max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ : str = self.model_fpaa.to("cpu" )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ : int = self.model_fpaa.float()
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AutoModelForSeqaSeqLM.from_pretrained("t5-small",load_in_abit=_A,device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "t5-small"
SCREAMING_SNAKE_CASE_ : Optional[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE_ : Optional[Any] = "Translate in German: Hello, my dog is cute"
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE_ : Dict = None
# test with `t5-small`
SCREAMING_SNAKE_CASE_ : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(self.input_text,return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(**_A )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name,load_in_abit=_A,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(self.input_text,return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ : Tuple = model.generate(**_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = modules
def __UpperCamelCase ( self : str ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q,bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer(self.input_text,return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(**_A )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name,load_in_abit=_A,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(self.input_text,return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ : List[str] = model.generate(**_A )
class a__ ( A__ ):
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# model_name
SCREAMING_SNAKE_CASE_ : str = "bigscience/bloom-560m"
SCREAMING_SNAKE_CASE_ : Optional[int] = "t5-small"
# Different types of model
SCREAMING_SNAKE_CASE_ : List[str] = AutoModel.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
# Sequence classification model
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
self.model_name,load_in_abit=_A,device_map="auto" )
# CausalLM model
SCREAMING_SNAKE_CASE_ : int = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
# Seq2seq model
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name,load_in_abit=_A,device_map="auto" )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class a__ ( A__ ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().setUp()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = pipeline(
"text-generation",model=self.model_name,model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa},max_new_tokens=self.MAX_NEW_TOKENS,)
# Real second forward pass
SCREAMING_SNAKE_CASE_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"],self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class a__ ( A__ ):
def __UpperCamelCase ( self : str ):
"""simple docstring"""
super().setUp()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name,load_in_abit=_A,device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ),{0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(self.input_text,return_tensors="pt" )
# Second real batch
SCREAMING_SNAKE_CASE_ : Optional[int] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0],skip_special_tokens=_A ),self.EXPECTED_OUTPUTS )
class a__ ( A__ ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "facebook/opt-350m"
super().setUp()
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=_A )
self.assertEqual(set(model.hf_device_map.values() ),{torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE_ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE_ : List[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_A ) ):
SCREAMING_SNAKE_CASE_ : Dict = LoRALayer(module.q_proj,rank=16 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LoRALayer(module.k_proj,rank=16 )
SCREAMING_SNAKE_CASE_ : List[Any] = LoRALayer(module.v_proj,rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE_ : int = self.tokenizer("Test batch ",return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE_ : int = model.forward(**_A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_A,_A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_A,nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class a__ ( A__ ):
A = 'gpt2-xl'
A = 3.3191_8548_5415_2187
| 18 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : str , a : Optional[Any] , a : int=13 , a : str=7 , a : str=True , a : List[str]=True , a : Optional[Any]=True , a : int=True , a : List[Any]=99 , a : List[Any]=32 , a : Tuple=5 , a : Any=4 , a : Optional[int]=37 , a : Tuple="gelu" , a : Any=0.1 , a : int=0.1 , a : List[Any]=128 , a : Union[str, Any]=32 , a : Union[str, Any]=16 , a : Dict=2 , a : List[Any]=0.0_2 , a : Optional[Any]=3 , a : List[Any]=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Optional[Any] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Dict = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : Union[str, Any] = scope
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Tuple = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self : Optional[Any] , a : Optional[int] , a : Tuple , a : Optional[int] , a : List[Any] , a : Tuple , a : List[str] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = NezhaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a )
lowerCAmelCase__ : List[str] = model(a , token_type_ids=a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : List[Any] , a : Union[str, Any] , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Tuple , a : List[Any] , a : Tuple , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[int] = NezhaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Dict = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
lowerCAmelCase__ : List[str] = model(a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : List[Any] , a : str , a : List[str] , a : Tuple , a : List[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = NezhaForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : List[Any] , a : Optional[int] , a : List[Any] , a : int , a : List[str] , a : Union[str, Any] , a : int , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = NezhaForNextSentencePrediction(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : int , a : Optional[int] , a : str , a : List[str] , a : int , a : Dict , a : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = NezhaForPreTraining(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : List[str] , a : Any , a : Any , a : Union[str, Any] , a : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = NezhaForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Tuple , a : str , a : Union[str, Any] , a : Tuple , a : Optional[Any] , a : Dict , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Optional[Any] = NezhaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : List[str] , a : Dict , a : str , a : Optional[Any] , a : Optional[int] , a : List[str] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : str = NezhaForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : int , a : Tuple , a : List[Any] , a : Tuple , a : List[Any] , a : Optional[int] , a : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Any = NezhaForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _lowerCamelCase ( self : str , a : Tuple , a : int , a : Dict=False ):
'''simple docstring'''
lowerCAmelCase__ : int = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
lowerCAmelCase__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = NezhaModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ : str = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , a , a , a , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = NezhaModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Any = model_class(config=a )
lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(a , a )
lowerCAmelCase__ : int = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'bert.pt' ) )
lowerCAmelCase__ : Any = torch.jit.load(os.path.join(a , 'bert.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowerCAmelCase__ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a )[0]
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Optional[int] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a )[0]
lowerCAmelCase__ : int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : List[Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) ) | 212 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 267 |
# Function to print upper half of diamond (pyramid)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
for i in range(0 , SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
UpperCAmelCase = 1
while K:
UpperCAmelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
UpperCAmelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""") | 267 | 1 |
'''simple docstring'''
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase__ = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCamelCase( UpperCAmelCase_ ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCamelCase( UpperCAmelCase_ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def UpperCamelCase( UpperCAmelCase_ ):
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase : Optional[int] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
UpperCAmelCase : Optional[int] = 0
# Doctest custom flag to ignore output.
lowercase__ = doctest.register_optionflag("IGNORE_RESULT")
lowercase__ = doctest.OutputChecker
class A_ ( __lowerCAmelCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Any ) -> Optional[int]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
lowercase__ = CustomOutputChecker
lowercase__ = HfDoctestModule
lowercase__ = HfDocTestParser
| 151 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """audio-spectrogram-transformer"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=10_24 , lowercase=1_28 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(**lowercase)
a__: Any = hidden_size
a__: int = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Any = intermediate_size
a__: Union[str, Any] = hidden_act
a__: int = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: str = initializer_range
a__: Tuple = layer_norm_eps
a__: Any = patch_size
a__: int = qkv_bias
a__: Optional[Any] = frequency_stride
a__: int = time_stride
a__: List[str] = max_length
a__: Tuple = num_mel_bins
| 290 | 0 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" ,[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Any ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str] ,):
lowercase_ :Union[str, Any] = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ :Union[str, Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ :Optional[Any] = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCamelCase )
assert base_extractor.is_extractable(__lowerCamelCase )
lowercase_ :Dict = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__lowerCamelCase ,__lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ :Optional[int] = file_path.read_text(encoding="utf-8" )
else:
lowercase_ :Union[str, Any] = output_path.read_text(encoding="utf-8" )
lowercase_ :Union[str, Any] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" ,[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] ,)
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Any ,__lowerCamelCase : Union[str, Any] ,):
lowercase_ :str = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
lowercase_ :List[str] = input_paths[compression_format]
if input_path is None:
lowercase_ :Optional[Any] = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCamelCase )
lowercase_ :Tuple = Extractor.infer_extractor_format(__lowerCamelCase )
assert extractor_format is not None
lowercase_ :List[str] = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ :Optional[Any] = file_path.read_text(encoding="utf-8" )
else:
lowercase_ :List[str] = output_path.read_text(encoding="utf-8" )
lowercase_ :Tuple = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : int ):
import tarfile
lowercase_ :Tuple = tmp_path / "data_dot_dot"
directory.mkdir()
lowercase_ :Union[str, Any] = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__lowerCamelCase ,"w" ) as f:
f.add(__lowerCamelCase ,arcname=os.path.join(".." ,text_file.name ) )
return path
@pytest.fixture
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ):
import tarfile
lowercase_ :Tuple = tmp_path / "data_sym_link"
directory.mkdir()
lowercase_ :Tuple = directory / "tar_file_with_sym_link.tar"
os.symlink(".." ,directory / "subdir" ,target_is_directory=__lowerCamelCase )
with tarfile.TarFile(__lowerCamelCase ,"w" ) as f:
f.add(str(directory / "subdir" ) ,arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" ,[("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] ,)
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ):
lowercase_ :Tuple = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
lowercase_ :Dict = insecure_tar_files[insecure_tar_file]
lowercase_ :int = tmp_path / "extracted"
TarExtractor.extract(__lowerCamelCase ,__lowerCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCAmelCase_ ( __lowerCamelCase : int ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ :Any = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
lowercase_ :Any = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__lowerCamelCase )
assert zipfile.is_zipfile(str(__lowerCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowerCamelCase ) # but we're right
| 147 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( _lowerCAmelCase ):
__A = ["image_processor", "tokenizer"]
__A = "LayoutLMv3ImageProcessor"
__A = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : int , lowercase : Optional[Any]=None , lowercase : List[str]=None , **lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase , )
lowercase_ :Optional[int] = kwargs.pop("feature_extractor" )
lowercase_ :Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase , lowercase )
def __call__( self : Optional[Any] , lowercase : List[str] , lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase : Union[List[List[int]], List[List[List[int]]]] = None , lowercase : Optional[Union[List[int], List[List[int]]]] = None , lowercase : bool = True , lowercase : Union[bool, str, PaddingStrategy] = False , lowercase : Union[bool, str, TruncationStrategy] = None , lowercase : Optional[int] = None , lowercase : int = 0 , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = True , lowercase : Optional[Union[str, TensorType]] = None , **lowercase : List[Any] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase_ :Dict = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
lowercase_ :str = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase_ :Union[str, Any] = features["words"]
lowercase_ :Optional[Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
lowercase_ :Any = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase_ :Any = self.get_overflowing_images(lowercase , encoded_inputs["overflow_to_sample_mapping"] )
lowercase_ :Any = images
return encoded_inputs
def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : Optional[Any] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def lowercase__ ( self : Union[str, Any] , *lowercase : List[Any] , **lowercase : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def lowercase__ ( self : List[Any] , *lowercase : Any , **lowercase : str ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowercase__ ( self : str ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase , )
return self.image_processor
| 147 | 1 |
"""simple docstring"""
from itertools import product
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = sides_number
snake_case_ = max_face_number * dice_number
snake_case_ = [0] * (max_total + 1)
snake_case_ = 1
snake_case_ = range(_SCREAMING_SNAKE_CASE , max_face_number + 1 )
for dice_numbers in product(_SCREAMING_SNAKE_CASE , repeat=_SCREAMING_SNAKE_CASE ):
snake_case_ = sum(_SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def _a ( ) -> float:
snake_case_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
snake_case_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
snake_case_ = 0
snake_case_ = 9
snake_case_ = 4 * 9
snake_case_ = 6
for peter_total in range(_SCREAMING_SNAKE_CASE , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case_ = (4**9) * (6**6)
snake_case_ = peter_wins_count / total_games_number
snake_case_ = round(_SCREAMING_SNAKE_CASE , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """upernet"""
def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = backbone_config.get("""model_type""" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(UpperCAmelCase_ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 347 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 50 | from __future__ import annotations
import numpy as np
def __lowercase ( lowerCamelCase : list[float] ):
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 50 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Any = """xglm"""
A__ : int = ["""past_key_values"""]
A__ : Any = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCamelCase=2_5_6_0_0_8 , __UpperCamelCase=2_0_4_8 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=4_0_9_6 , __UpperCamelCase=2_4 , __UpperCamelCase=1_6 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = d_model
UpperCamelCase_ = ffn_dim
UpperCamelCase_ = num_layers
UpperCamelCase_ = attention_heads
UpperCamelCase_ = activation_function
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = layerdrop
UpperCamelCase_ = init_std
UpperCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase_ = use_cache
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
| 122 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=3 , __UpperCamelCase=3_2 , __UpperCamelCase=3 , __UpperCamelCase=1_0 , __UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCamelCase=[1, 1, 2, 1] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=3 , __UpperCamelCase=None , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embeddings_size
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = depths
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_act
UpperCamelCase_ = num_labels
UpperCamelCase_ = scope
UpperCamelCase_ = len(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModel(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = FlaxRegNetForImageClassification(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A__ : Any = False
A__ : List[Any] = False
A__ : Dict = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ):
"""simple docstring"""
return
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase , **__UpperCamelCase ):
return model(pixel_values=__UpperCamelCase , **__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""np""" )
UpperCamelCase_ = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 122 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "vit_msn"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-0_6 , _a=224 , _a=16 , _a=3 , _a=True , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = qkv_bias
| 168 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_2_vision_model"
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.00_001 , _a=0.0 , _a=1e-1_0 , _a=True , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = hidden_size
lowerCamelCase = intermediate_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = patch_size
lowerCamelCase = image_size
lowerCamelCase = initializer_range
lowerCamelCase = attention_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = hidden_act
lowerCamelCase = qkv_bias
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_2_qformer"
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1e-1_2 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , **_a )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = cross_attention_frequency
lowerCamelCase = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip-2"
__UpperCamelCase = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
"""simple docstring"""
super().__init__(**_a )
if vision_config is None:
lowerCamelCase = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
lowerCamelCase = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
lowerCamelCase = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCamelCase = BlipaVisionConfig(**_a )
lowerCamelCase = BlipaQFormerConfig(**_a )
lowerCamelCase = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase = CONFIG_MAPPING[text_model_type](**_a )
lowerCamelCase = self.text_config.tie_word_embeddings
lowerCamelCase = self.text_config.is_encoder_decoder
lowerCamelCase = num_query_tokens
lowerCamelCase = self.vision_config.hidden_size
lowerCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase = 1.0
lowerCamelCase = 0.02
@classmethod
def _lowerCAmelCase ( cls , _a , _a , _a , **_a , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.vision_config.to_dict()
lowerCamelCase = self.qformer_config.to_dict()
lowerCamelCase = self.text_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 168 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
a__ : Optional[Any] ='''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
a__ : List[str] ='''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
a__ : Any ='''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def _lowerCamelCase ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _lowerCamelCase ( self : List[Any] , __A : int , __A : Union[str, Any] , __A : Union[str, Any]=None , __A : List[Any]=1 , __A : Union[str, Any]="binary" , __A : Union[str, Any]=None ):
__UpperCamelCase = fa_score(
__A , __A , labels=__A , pos_label=__A , average=__A , sample_weight=__A )
return {"f1": float(__A ) if score.size == 1 else score}
| 53 |
'''simple docstring'''
a__ : Optional[Any] =256
# Modulus to hash a string
a__ : Dict =1_000_003
def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowercase ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = 'abc1abc12'
__UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase )
# Test 2)
__UpperCamelCase = 'ABABX'
__UpperCamelCase = 'ABABZABABYABABX'
assert rabin_karp(__lowercase , __lowercase )
# Test 3)
__UpperCamelCase = 'AAAB'
__UpperCamelCase = 'ABAAAAAB'
assert rabin_karp(__lowercase , __lowercase )
# Test 4)
__UpperCamelCase = 'abcdabcy'
__UpperCamelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowercase , __lowercase )
# Test 5)
__UpperCamelCase = 'Lü'
__UpperCamelCase = 'Lüsai'
assert rabin_karp(__lowercase , __lowercase )
__UpperCamelCase = 'Lue'
assert not rabin_karp(__lowercase , __lowercase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class lowercase_ ( __snake_case , __snake_case ):
_lowerCamelCase = 'focalnet'
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=False , lowercase_=[192, 384, 768, 768] , lowercase_=[2, 2, 6, 2] , lowercase_=[2, 2, 2, 2] , lowercase_=[3, 3, 3, 3] , lowercase_="gelu" , lowercase_=4.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=False , lowercase_=1e-4 , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : Union[str, Any] = image_size
_snake_case : Tuple = patch_size
_snake_case : Any = num_channels
_snake_case : int = embed_dim
_snake_case : List[str] = use_conv_embed
_snake_case : Optional[Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Union[str, Any] = focal_levels
_snake_case : Optional[Any] = focal_windows
_snake_case : str = hidden_act
_snake_case : Optional[Any] = mlp_ratio
_snake_case : Any = hidden_dropout_prob
_snake_case : List[Any] = drop_path_rate
_snake_case : Optional[Any] = use_layerscale
_snake_case : List[str] = layerscale_value
_snake_case : Any = use_post_layernorm
_snake_case : str = use_post_layernorm_in_modulation
_snake_case : Optional[int] = normalize_modulator
_snake_case : Dict = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = encoder_stride
_snake_case : Union[str, Any] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
_snake_case ,_snake_case : int = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names ) | 284 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Any = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure) | 284 | 1 |
import os
import pytest
from attr import dataclass
A__ = 'us-east-1' # defaults region
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5_500,
}
__lowerCamelCase = {**hyperparameters, '''max_steps''': 1_000}
@property
def snake_case ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case ( self ):
"""simple docstring"""
return F'{self.framework}-transfromers-test'
@property
def snake_case ( self ):
"""simple docstring"""
return F'./tests/sagemaker/scripts/{self.framework}'
@property
def snake_case ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 82 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A__ ( ):
_UpperCamelCase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_UpperCamelCase : List[Any] = g.get_repo('huggingface/diffusers' )
_UpperCamelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_UpperCamelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 83 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = list(snake_case_ )
_snake_case = list(snake_case_ )
_snake_case = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_snake_case = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
while True:
_snake_case = ["""$"""] * len(snake_case_ )
_snake_case = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1 , len(snake_case_ ) ):
_snake_case = compare_string(binary[i] , binary[j] )
if k is False:
_snake_case = """*"""
_snake_case = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_snake_case = list(set(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = []
for minterm in minterms:
_snake_case = """"""
for _ in range(snake_case_ ):
_snake_case = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = list(snake_case_ )
_snake_case = list(snake_case_ )
_snake_case = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = []
_snake_case = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_snake_case = 0
_snake_case = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_snake_case = j
if count == 1:
_snake_case = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_snake_case = 0
temp.append(prime_implicants[i] )
while True:
_snake_case = 0
_snake_case = -1
_snake_case = 0
for i in range(len(snake_case_ ) ):
_snake_case = chart[i].count(1 )
if count_n > max_n:
_snake_case = count_n
_snake_case = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_snake_case = 0
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_snake_case = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case_ ):
_snake_case = 1
return chart
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = int(input("""Enter the no. of variables\n""" ) )
_snake_case = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_snake_case = decimal_to_binary(snake_case_ , snake_case_ )
_snake_case = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_snake_case = prime_implicant_chart(snake_case_ , snake_case_ )
_snake_case = selection(snake_case_ , snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 355 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCAmelCase = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=16 , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=14 , UpperCAmelCase=10 , UpperCAmelCase=19 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 2, 3, 4, 5] , UpperCAmelCase=25 , UpperCAmelCase=5 , ) -> int:
_snake_case = d_model
_snake_case = parent
_snake_case = batch_size
_snake_case = prediction_length
_snake_case = context_length
_snake_case = cardinality
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = embedding_dimension
_snake_case = is_training
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = context_length
_snake_case = prediction_length + label_length
_snake_case = label_length
_snake_case = moving_average
_snake_case = autocorrelation_factor
def lowercase (self ) -> str:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase (self , UpperCAmelCase ) -> Tuple:
_snake_case = config.context_length + max(config.lags_sequence )
_snake_case = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_snake_case = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_snake_case = floats_tensor([self.batch_size, _past_length] )
_snake_case = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_snake_case = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_snake_case = floats_tensor([self.batch_size, config.prediction_length] )
_snake_case = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowercase (self ) -> int:
_snake_case = self.get_config()
_snake_case = self.prepare_autoformer_inputs_dict(UpperCAmelCase )
return config, inputs_dict
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_snake_case = AutoformerModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
_snake_case = model(**UpperCAmelCase )
_snake_case = outputs.encoder_last_hidden_state
_snake_case = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
_snake_case = AutoformerEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = model.create_network_inputs(**UpperCAmelCase )
_snake_case, _snake_case = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_snake_case = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_snake_case = encoder(inputs_embeds=UpperCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_snake_case = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_snake_case = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_snake_case = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_snake_case = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
_snake_case = AutoformerDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case = decoder(
trend=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase_ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase_ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> List[Any]:
_snake_case = AutoformerModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowercase (self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase (self ) -> Any:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
_snake_case, _snake_case = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def lowercase (self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowercase (self ) -> Tuple:
pass
def lowercase (self ) -> Any:
_snake_case = inspect.signature(getattr(UpperCAmelCase , """forward""" ) )
# The main input is the name of the argument after `self`
_snake_case = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase )
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , """seq_length""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """decoder_seq_length""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """encoder_seq_length""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """d_model""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """num_attention_heads""" , UpperCAmelCase )
_snake_case = d_model // num_attention_heads
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_snake_case = len(UpperCAmelCase )
_snake_case = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# decoder attentions
_snake_case = outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_snake_case = outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + 2 , len(UpperCAmelCase ) )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase (self ) -> List[Any]:
super().test_retain_grad_hidden_states_attentions()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE="train-batch.pt" ):
_snake_case = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
_snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Union[str, Any]:
_snake_case = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase )
_snake_case = prepare_batch()
with torch.no_grad():
_snake_case = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_snake_case = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> str:
_snake_case = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase )
_snake_case = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_snake_case = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_snake_case = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> Optional[int]:
_snake_case = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase )
_snake_case = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_snake_case = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_snake_case = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCAmelCase )
_snake_case = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCAmelCase )
_snake_case = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase , rtol=1e-1 ) ) | 270 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCamelCase__( __A ):
pass
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> None:
A__ = data
A__ = None
def __iter__( self ) -> Dict:
A__ = self
A__ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__UpperCAmelCase )
yield node.data
A__ = node.next_node
@property
def snake_case__ ( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__lowerCamelCase = Node(1)
__lowerCamelCase = Node(2)
__lowerCamelCase = Node(3)
__lowerCamelCase = Node(4)
print(root_node.has_loop) # False
__lowerCamelCase = root_node.next_node
print(root_node.has_loop) # True
__lowerCamelCase = Node(5)
__lowerCamelCase = Node(6)
__lowerCamelCase = Node(5)
__lowerCamelCase = Node(6)
print(root_node.has_loop) # False
__lowerCamelCase = Node(1)
print(root_node.has_loop) # False
| 221 | """simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=64 ,__UpperCAmelCase=5 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,) -> List[Any]:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = vocab_size - 1
def snake_case__ ( self ) -> str:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ) -> List[str]:
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def snake_case__ ( self ) -> List[str]:
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
A__ = True
A__ = GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = self.num_labels
A__ = GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = self.num_labels
A__ = GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = self.num_labels
A__ = GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = True
A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,use_cache=__UpperCAmelCase )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
A__ = torch.cat([input_mask, next_mask] ,dim=-1 )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,past_key_values=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,)['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-3 ) )
def snake_case__ ( self ) -> Dict:
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__( __A , __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : List[Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : List[str] = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> Tuple:
A__ = GPTNeoXModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=64 ,num_attention_heads=8 )
def snake_case__ ( self ) -> str:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> List[str]:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Dict:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> str:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def snake_case__ ( self ) -> Any:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def snake_case__ ( self ) -> str:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tuple:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 10] ,config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
A__ = original_model(__UpperCAmelCase ).last_hidden_state
A__ = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = {'type': scaling_type, 'factor': 1_0.0}
A__ = GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
A__ = scaled_model(__UpperCAmelCase ).last_hidden_state
A__ = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
@require_torch
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> int:
A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
A__ = tokenizer('My favorite food is' ,return_tensors='pt' ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A__ = model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=20 )
A__ = tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
| 221 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _snake_case ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase_ : str = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _snake_case ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ):
"""simple docstring"""
self._test_save_load_local()
def _snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 264 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = num_of_nodes
lowercase_ : list[list[int]] = []
lowercase_ : dict[int, int] = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase_ : Optional[int] = self.find_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase_ : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
lowercase_ : int = self.find_component(__SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = []
lowercase_ : Optional[Any] = 0
lowercase_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase_ : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase_ , lowercase_ , lowercase_ : List[Any] = edge
lowercase_ : Dict = self.m_component[u]
lowercase_ : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase_ : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ , lowercase_ : str = edge
lowercase_ : Tuple = self.m_component[u]
lowercase_ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowercase_ : str = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowercase = ''
else:
lowercase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase = val
def UpperCAmelCase_ ( ):
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=__SCREAMING_SNAKE_CASE , )
lowercase = ViTHybridConfig(backbone_config=__SCREAMING_SNAKE_CASE , image_size=384 , num_labels=1000 )
lowercase = False
# load original model from timm
lowercase = timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase = timm_model.state_dict()
if base_model:
remove_classification_head_(__SCREAMING_SNAKE_CASE )
lowercase = create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = 'huggingface/label-files'
lowercase = 'imagenet-1k-id2label.json'
lowercase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase = ViTHybridModel(__SCREAMING_SNAKE_CASE ).eval()
else:
lowercase = ViTHybridForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# create image processor
lowercase = create_transform(**resolve_data_config({} , model=__SCREAMING_SNAKE_CASE ) )
lowercase = transform.transforms
lowercase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowercase = ViTHybridImageProcessor(
do_resize=__SCREAMING_SNAKE_CASE , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__SCREAMING_SNAKE_CASE , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=__SCREAMING_SNAKE_CASE , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase = prepare_img()
lowercase = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
lowercase = processor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# verify logits
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
lowercase = timm_model.forward_features(__SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1e-3 )
else:
lowercase = timm_model(__SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 195 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
return None
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
return None
class A_ ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'tf' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'pt' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(snake_case ) )
vocab_file.flush()
lowercase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase = BertModel(BertConfig(vocab_size=len(snake_case ) ) )
model.save_pretrained(snake_case )
self._test_export(snake_case , 'pt' , 12 , snake_case )
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'tf' , 12 , **snake_case )
lowercase = quantize(Path(snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'pt' , 12 , **snake_case )
lowercase = quantize(snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase = Path(snake_case ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case )
return path
except Exception as e:
self.fail(snake_case )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'pt' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import TFBertModel
lowercase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'tf' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = FeatureExtractionPipeline(snake_case , snake_case )
lowercase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase , lowercase , lowercase , lowercase = infer_shapes(snake_case , snake_case )
# Assert all variables are present
self.assertEqual(len(snake_case ) , len(snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , snake_case )
self.assertSequenceEqual(variable_names[3:] , snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase , lowercase = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case ) , set(snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase , lowercase = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(len(snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 195 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
a = InstructBlipProcessor(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,**__lowerCamelCase : List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCamelCase ).tokenizer
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,**__lowerCamelCase : Optional[int] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCamelCase ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,**__lowerCamelCase : Optional[int] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCamelCase ).qformer_tokenizer
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,)
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__lowerCamelCase ,padding_value=1.0 )
a = InstructBlipProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=__lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__lowerCamelCase )
self.assertIsInstance(processor.qformer_tokenizer ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase ,qformer_tokenizer=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase ,return_tensors='''np''' )
a = processor(images=__lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase ,qformer_tokenizer=__lowerCamelCase )
a = 'lower newer'
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase ,return_token_type_ids=__lowerCamelCase )
a = qformer_tokenizer(__lowerCamelCase ,return_token_type_ids=__lowerCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor['''qformer_''' + key] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase ,qformer_tokenizer=__lowerCamelCase )
a = 'lower newer'
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase ,images=__lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] ,)
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase ,qformer_tokenizer=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase ,qformer_tokenizer=__lowerCamelCase )
a = 'lower newer'
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase ,images=__lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] ,)
| 351 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
a = '''The dog is cute and lives in the garden house'''
a = jnp.array([tokenizer.encode(__lowerCamelCase )] )
a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
a = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
a = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape ,__lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
| 330 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : Dict=30 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=32 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Optional[int]=37 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[Any]=10 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=2 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 2
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Tuple ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = TFDeiTModel(config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Dense ) )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any]=False ):
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFDeiTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCAmelCase_ ( ) -> str:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCAmelCase , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) ) | 225 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = 0
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCAmelCase ) , 0 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
# Check that tokenizer_type ≠ model_type
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase , config=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowerCAmelCase , """vocab.txt""" ) )
__lowerCAmelCase : Any = AutoTokenizer.from_pretrained(lowerCAmelCase , tokenizer_type="""bert""" , use_fast=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowerCAmelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowerCAmelCase , """merges.txt""" ) )
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase , tokenizer_type="""gpt2""" , use_fast=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowerCAmelCase , """vocab.txt""" ) )
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase , tokenizer_type="""bert""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowerCAmelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowerCAmelCase , """merges.txt""" ) )
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
with pytest.raises(lowerCAmelCase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowerCAmelCase : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCAmelCase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__lowerCAmelCase : List[str] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = TOKENIZER_MAPPING.values()
__lowerCAmelCase : Dict = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCAmelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowerCAmelCase ) , lowerCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowerCAmelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowerCAmelCase )
__lowerCAmelCase : str = """Hello, world. How are you?"""
__lowerCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Any = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = get_tokenizer_config("""bert-base-cased""" )
__lowerCAmelCase : Any = config.pop("""_commit_hash""" , lowerCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCAmelCase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__lowerCAmelCase : List[Any] = get_tokenizer_config(lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Tuple = get_tokenizer_config(lowerCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCAmelCase )
AutoTokenizer.register(lowerCAmelCase , slow_tokenizer_class=lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoTokenizer.register(lowerCAmelCase , slow_tokenizer_class=lowerCAmelCase )
__lowerCAmelCase : Tuple = CustomTokenizer.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCAmelCase )
# Can register in two steps
AutoTokenizer.register(lowerCAmelCase , slow_tokenizer_class=lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowerCAmelCase , fast_tokenizer_class=lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCAmelCase , slow_tokenizer_class=lowerCAmelCase , fast_tokenizer_class=lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoTokenizer.register(lowerCAmelCase , fast_tokenizer_class=lowerCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[Any] = BertTokenizerFast.from_pretrained(lowerCAmelCase )
bert_tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = CustomTokenizerFast.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase , use_fast=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCAmelCase )
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase , trust_remote_code=lowerCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCAmelCase , use_fast=lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase , trust_remote_code=lowerCAmelCase , use_fast=lowerCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : int =False
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =NewTokenizer
lowerCamelCase : Any =False
try:
AutoConfig.register("""custom""" , lowerCAmelCase )
AutoTokenizer.register(lowerCAmelCase , slow_tokenizer_class=lowerCAmelCase )
AutoTokenizer.register(lowerCAmelCase , fast_tokenizer_class=lowerCAmelCase )
# If remote code is not set, the default is to use local
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCAmelCase , use_fast=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__lowerCAmelCase : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowerCAmelCase , use_fast=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowerCAmelCase , use_fast=lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(lowerCAmelCase , revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 139 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict=13 , lowerCAmelCase : int=7 , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : int=64 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : str=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[int] = embedding_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = type_vocab_size
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = num_labels
__lowerCAmelCase : Union[str, Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_input_mask:
__lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = MobileBertForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : int = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_labels
__lowerCAmelCase : int = MobileBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Dict = MobileBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.num_choices
__lowerCAmelCase : List[Any] = MobileBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] =(
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=False ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = MobileBertModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
def snake_case_ (__A : Any ) -> Optional[Any]:
return torch.tensor(
__A , dtype=torch.long , device=__A , )
__UpperCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase )
__lowerCAmelCase : int = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(lowerCAmelCase )[0]
__lowerCAmelCase : List[Any] = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowerCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 139 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
__UpperCAmelCase = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : str = "retribert"
def __init__( self, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=0, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Any = initializer_range
UpperCamelCase : str = layer_norm_eps
UpperCamelCase : Tuple = share_encoders
UpperCamelCase : Union[str, Any] = projection_dim
| 119 |
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ) -> Optional[int]:
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=snake_case__ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=snake_case__ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=snake_case__ , help='where to store parsed gold_data_path file' , )
UpperCamelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCamelCase : int = json.load(snake_case__ )
for dpr_record in tqdm(snake_case__ ):
UpperCamelCase : Union[str, Any] = dpr_record['question']
UpperCamelCase : Dict = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(snake_case__ ) + '\n' )
if __name__ == "__main__":
main()
| 119 | 1 |
import inspect
import unittest
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: str ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
_SCREAMING_SNAKE_CASE = inspect.getmembers(UpperCamelCase_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
_SCREAMING_SNAKE_CASE = """k-diffusion"""
elif backend == "invisible_watermark":
_SCREAMING_SNAKE_CASE = """invisible-watermark"""
assert backend in deps, F'{backend} is not in the deps table!'
| 355 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[str] = KandinskyVaaInpaintPipeline
__snake_case : Union[str, Any] = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__snake_case : Tuple = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__snake_case : str = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__snake_case : List[str] = False
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str]=0 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
_SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCamelCase ( self: int ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = """a hat"""
_SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 125 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_a , _a , _a = False, False, False
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """dict"""
SCREAMING_SNAKE_CASE__ : Dict = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = field(default="""Audio""" ,init=__a ,repr=__a )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ : Optional[int] = BytesIO()
sf.write(_UpperCamelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ : Dict = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
UpperCAmelCase_ : List[str] = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2767
UpperCAmelCase_ : Any = BytesIO(bytes() )
sf.write(_UpperCamelCase , _UpperCamelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ : Any = xsplitext(_UpperCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ : List[str] = token_per_repo_id or {}
UpperCAmelCase_ : Optional[Any] = path.split("::" )[-1]
try:
UpperCAmelCase_ : List[Any] = string_to_dict(_UpperCamelCase , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ : Optional[Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ : str = None
with xopen(_UpperCamelCase , "rb" , use_auth_token=_UpperCamelCase ) as f:
UpperCAmelCase_ , UpperCAmelCase_ : str = sf.read(_UpperCamelCase )
else:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = sf.read(_UpperCamelCase )
UpperCAmelCase_ : str = array.T
if self.mono:
UpperCAmelCase_ : Optional[int] = librosa.to_mono(_UpperCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ : Any = librosa.resample(_UpperCamelCase , orig_sr=_UpperCamelCase , target_sr=self.sampling_rate )
UpperCAmelCase_ : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Optional[int] = pa.array([None] * len(_UpperCamelCase ) , type=pa.binary() )
UpperCAmelCase_ : int = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : str = pa.array([None] * len(_UpperCamelCase ) , type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ : Optional[int] = pa.array([Audio().encode_example(_UpperCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ : List[str] = storage.field("bytes" )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_UpperCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ : List[Any] = storage.field("path" )
else:
UpperCAmelCase_ : Dict = pa.array([None] * len(_UpperCamelCase ) , type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(_UpperCamelCase , self.pa_type )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowercase_ ):
with xopen(_UpperCamelCase , "rb" ) as f:
UpperCAmelCase_ : Tuple = f.read()
return bytes_
UpperCAmelCase_ : List[str] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ : int = pa.array(
[os.path.basename(_UpperCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(_UpperCamelCase , self.pa_type )
| 61 |
import math
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
while num > 0:
lowerCAmelCase_ = num % 8
lowerCAmelCase_ = octal + (remainder * math.floor(math.pow(10 , __lowerCAmelCase ) ))
counter += 1
lowerCAmelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(__lowerCAmelCase )}"""
def lowerCamelCase__ ( ):
"""simple docstring"""
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 231 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
lowercase__ = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowercase__ = components[:-1] + [test_fn.replace('.py' , '' )]
lowercase__ = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = get_module_path(_SCREAMING_SNAKE_CASE )
lowercase__ = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = []
lowercase__ = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = []
lowercase__ = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_test_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
lowercase__ = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase__ = test.model_tester.__class__
return model_tester
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = get_test_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = []
for test_class in test_classes:
lowercase__ = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = get_test_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_model_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_model_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 269 |
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
pass
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
pass
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict )-> Optional[int]:
"""simple docstring"""
lowercase__ = [
[],
[],
[],
]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Dict )-> str:
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] )-> Any:
"""simple docstring"""
lowercase__ = []
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
lowercase__ = min(self.queue )
self.queue.remove(a )
return data
def __str__( self : Union[str, Any] )-> str:
"""simple docstring"""
return str(self.queue )
def __UpperCamelCase () -> str:
lowercase__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase () -> List[str]:
lowercase__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 269 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _A ( unittest.TestCase ):
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : int = 0
def __a ( self : Tuple ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_A , _A )
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any = Path(_A ) / '''preprocessor_config.json'''
lowercase : List[str] = Path(_A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) )
lowercase : str = AutoImageProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[str] = Path(_A ) / '''preprocessor_config.json'''
lowercase : Union[str, Any] = Path(_A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) )
lowercase : Dict = AutoImageProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Union[str, Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] = Path(_A ) / '''preprocessor_config.json'''
lowercase : Any = Path(_A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : List[Any] = AutoImageProcessor.from_pretrained(_A ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str = CLIPImageProcessor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
lowercase : Dict = AutoImageProcessor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
lowercase : Tuple = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_A , _A )
def __a ( self : str ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[Any] = Path(_A ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
lowercase : str = AutoImageProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_A , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_A , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : int = AutoImageProcessor.from_pretrained(_A , revision='''aaaaaa''' )
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_A ):
lowercase : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
lowercase : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A )
lowercase : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A )
lowercase : Tuple = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __a ( self : List[Any] ) -> Dict:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _A )
AutoImageProcessor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoImageProcessor.register(_A , _A )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[Any] = Path(_A ) / '''preprocessor_config.json'''
lowercase : Dict = Path(_A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) )
lowercase : List[str] = CustomImageProcessor.from_pretrained(_A )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A )
lowercase : List[Any] = AutoImageProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
class _A ( __snake_case ):
_UpperCamelCase : Union[str, Any] = True
try:
AutoConfig.register('''custom''' , _A )
AutoImageProcessor.register(_A , _A )
# If remote code is not set, the default is to use local
lowercase : Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_A , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 308 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=10 , UpperCAmelCase=3 , UpperCAmelCase=32 * 4 , UpperCAmelCase=32 * 6 , UpperCAmelCase=4 , UpperCAmelCase=32 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = mask_feature_size
def lowercase (self ) -> str:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase (self ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase (self ) -> Optional[Any]:
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.prepare_config_and_inputs()
_snake_case = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , config.decoder_config.decoder_layers )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
with torch.no_grad():
_snake_case = MaskFormerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase , output_hidden_states=UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase , UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = MaskFormerForInstanceSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
def comm_check_on_output(UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
_snake_case = model(
pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> int:
_snake_case = MaskFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowercase (self ) -> int:
self.config_tester.run_common_tests()
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowercase (self ) -> Optional[int]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowercase (self ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowercase (self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> Tuple:
pass
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@slow
def lowercase (self ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_snake_case = MaskFormerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase ).long(),
}
_snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase (self ) -> Tuple:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ).loss
loss.backward()
def lowercase (self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1E-4
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase (self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowercase (self ) -> str:
_snake_case = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCAmelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[str]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[Any]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> Tuple:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_snake_case = inputs["""pixel_values"""].to(UpperCAmelCase )
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""mask_labels"""]]
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None ) | 341 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __lowercase ( __a ):
"""simple docstring"""
UpperCamelCase : torch.FloatTensor
UpperCamelCase : torch.FloatTensor
class __lowercase ( __a , __a ):
"""simple docstring"""
UpperCamelCase : str = 1
@register_to_config
def __init__( self , A = 20_00 , A = 0.15 , A = 0.01 , A = 13_48.0 , A = 1e-5 , A = 1 , ) -> str:
'''simple docstring'''
lowerCamelCase = sigma_max
# setable values
lowerCamelCase = None
self.set_sigmas(a__ , a__ , a__ , a__ )
def __A ( self , A , A = None ) -> int:
'''simple docstring'''
return sample
def __A ( self , A , A = None , A = None ) -> Any:
'''simple docstring'''
lowerCamelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase = torch.linspace(1 , a__ , a__ , device=a__ )
def __A ( self , A , A = None , A = None , A = None ) -> Dict:
'''simple docstring'''
lowerCamelCase = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(a__ , a__ )
lowerCamelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase = torch.exp(torch.linspace(math.log(a__ ) , math.log(a__ ) , a__ ) )
lowerCamelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __A ( self , A , A ) -> Tuple:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __A ( self , A , A , A , A = None , A = True , ) -> int:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler""" )
lowerCamelCase = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase = timesteps.to(self.discrete_sigmas.device )
lowerCamelCase = self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase = self.get_adjacent_sigma(a__ , a__ ).to(sample.device )
lowerCamelCase = torch.zeros_like(a__ )
lowerCamelCase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase = diffusion.unsqueeze(-1 )
lowerCamelCase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase = randn_tensor(
sample.shape , layout=sample.layout , generator=a__ , device=sample.device , dtype=sample.dtype )
lowerCamelCase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=a__ , prev_sample_mean=a__ )
def __A ( self , A , A , A = None , A = True , ) -> str:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase = randn_tensor(sample.shape , layout=sample.layout , generator=a__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase = step_size.unsqueeze(-1 )
lowerCamelCase = sample + step_size * model_output
lowerCamelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def __A ( self , A , A , A , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = timesteps.to(original_samples.device )
lowerCamelCase = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(a__ ) * sigmas[:, None, None, None]
)
lowerCamelCase = noise + original_samples
return noisy_samples
def __len__( self ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 359 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Any = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Tuple = "switch_transformers"
UpperCamelCase : Tuple = ["past_key_values"]
UpperCamelCase : Any = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , A=3_21_28 , A=7_68 , A=64 , A=20_48 , A=64 , A=12 , A=3 , A=12 , A=3 , A=12 , A=8 , A=False , A=0.01 , A="float32" , A=False , A=32 , A=1_28 , A=0.1 , A=1e-6 , A=0.001 , A=0.001 , A=1.0 , A="relu" , A=True , A=False , A=True , A=0 , A=1 , **A , ) -> str:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = d_model
lowerCamelCase = d_kv
lowerCamelCase = d_ff
lowerCamelCase = num_sparse_encoder_layers
lowerCamelCase = num_layers
lowerCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCamelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCamelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCamelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCamelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCamelCase = num_heads
lowerCamelCase = num_experts
lowerCamelCase = expert_capacity
lowerCamelCase = router_bias
lowerCamelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
lowerCamelCase = router_dtype
lowerCamelCase = router_ignore_padding_tokens
lowerCamelCase = relative_attention_num_buckets
lowerCamelCase = relative_attention_max_distance
lowerCamelCase = dropout_rate
lowerCamelCase = layer_norm_epsilon
lowerCamelCase = initializer_factor
lowerCamelCase = feed_forward_proj
lowerCamelCase = use_cache
lowerCamelCase = add_router_probs
lowerCamelCase = router_z_loss_coef
lowerCamelCase = router_aux_loss_coef
lowerCamelCase = self.feed_forward_proj.split("""-""" )
lowerCamelCase = act_info[-1]
lowerCamelCase = act_info[0] == """gated"""
if len(A ) > 1 and act_info[0] != "gated" or len(A ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase = """gelu_new"""
super().__init__(
pad_token_id=A , eos_token_id=A , is_encoder_decoder=A , **A , )
| 66 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Tuple = 16
lowercase__ : List[str] = 32
def a__ ( lowercase : Accelerator, lowercase : int = 16 ) -> int:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(lowercase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase = datasets.map(
lowercase, batched=lowercase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(lowercase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCamelCase = 8
else:
_UpperCamelCase = None
return tokenizer.pad(
lowercase, padding='''longest''', max_length=lowercase, pad_to_multiple_of=lowercase, return_tensors='''pt''', )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(
tokenized_datasets['''train'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
_UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Optional[Any] = mocked_dataloaders # noqa: F811
def a__ ( lowercase : Tuple, lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', lowercase ) == "1":
_UpperCamelCase = 2
# New Code #
_UpperCamelCase = int(args.gradient_accumulation_steps )
_UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
_UpperCamelCase = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config['''lr''']
_UpperCamelCase = int(config['''num_epochs'''] )
_UpperCamelCase = int(config['''seed'''] )
_UpperCamelCase = int(config['''batch_size'''] )
_UpperCamelCase = evaluate.load('''glue''', '''mrpc''' )
set_seed(lowercase )
_UpperCamelCase , _UpperCamelCase = get_dataloaders(lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = AdamW(params=model.parameters(), lr=lowercase )
# Instantiate scheduler
_UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=100, num_training_steps=(len(lowercase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
with LocalSGD(
accelerator=lowercase, model=lowercase, local_sgd_steps=lowercase, enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase, references=lowercase, )
_UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowercase )
def a__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=lowercase, default=lowercase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=lowercase, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument(
'''--local_sgd_steps''', type=lowercase, default=8, help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 324 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Dict = logging.getLogger()
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int:
"""simple docstring"""
_UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
return json.load(lowercase )
raise ValueError(F"""can't find {path}""" )
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_glue.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_clm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_summarization_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_ner.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_qa.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 324 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a__: Union[str, Any] = None
a__: List[str] = logging.get_logger(__name__)
a__: Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a__: int = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
a__: Dict = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
a__: List[str] = '▁'
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE = BarthezTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase="<s>",__lowerCamelCase="</s>",__lowerCamelCase="</s>",__lowerCamelCase="<s>",__lowerCamelCase="<unk>",__lowerCamelCase="<pad>",__lowerCamelCase="<mask>",**__lowerCamelCase,):
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(lowerCAmelCase_,lstrip=lowerCAmelCase_,rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_,lowerCAmelCase_ ) else mask_token
super().__init__(
lowerCAmelCase_,tokenizer_file=lowerCAmelCase_,bos_token=lowerCAmelCase_,eos_token=lowerCAmelCase_,unk_token=lowerCAmelCase_,sep_token=lowerCAmelCase_,cls_token=lowerCAmelCase_,pad_token=lowerCAmelCase_,mask_token=lowerCAmelCase_,**lowerCAmelCase_,)
A__ = vocab_file
A__ = False if not self.vocab_file else True
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
lowerCAmelCase_,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file,lowerCAmelCase_ )
return (out_vocab_file,)
| 364 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe(
[prompt],generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=15,output_type='''np''',use_karras_sigmas=__lowerCamelCase,)
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[str] = downstream_dict["""projector.weight"""]
snake_case : Dict = downstream_dict["""projector.bias"""]
snake_case : Dict = downstream_dict["""model.post_net.linear.weight"""]
snake_case : List[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
snake_case : str = WavaVecaForAudioFrameClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[Any] = downstream_dict["""model.linear.weight"""]
snake_case : str = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : Any = WavaVecaForXVector.from_pretrained(lowercase ,config=lowercase )
snake_case : str = downstream_dict["""connector.weight"""]
snake_case : Optional[Any] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case : List[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
snake_case : Optional[int] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
snake_case : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Tuple = torch.load(lowercase ,map_location="""cpu""" )
snake_case : Any = checkpoint["""Downstream"""]
snake_case : List[str] = WavaVecaConfig.from_pretrained(lowercase )
snake_case : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
lowercase ,return_attention_mask=lowercase ,do_normalize=lowercase )
snake_case : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case : int = convert_classification(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case : Dict = convert_diarization(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForXVector""" ):
snake_case : Optional[Any] = convert_xvector(lowercase ,lowercase ,lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
snake_case : List[str] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 124 |
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not isinstance(lowercase ,lowercase ):
snake_case : List[Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if number < 1:
snake_case : int = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
snake_case : Any = int(math.log(number // 3 ,2 ) ) + 2
snake_case : List[Any] = [3, 5]
snake_case : Optional[int] = 2
snake_case : Union[str, Any] = 3
for block in range(1 ,lowercase ):
for _ in range(lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
lowerCamelCase : Optional[Any] = 0
try:
lowerCamelCase : Tuple = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 124 | 1 |
'''simple docstring'''
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =(boundary[1] - boundary[0]) / steps
__lowercase =boundary[0]
__lowercase =boundary[1]
__lowercase =make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase =0.0
y += (h / 2.0) * f(_lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCAmelCase )
y += (h / 2.0) * f(_lowerCAmelCase )
return y
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =a + h
while x < (b - h):
yield x
__lowercase =x + h
def _A ( _lowerCAmelCase ): # enter your function here
"""simple docstring"""
__lowercase =(x - 0) * (x - 0)
return y
def _A ( ):
"""simple docstring"""
__lowercase =0.0 # Lower bound of integration
__lowercase =1.0 # Upper bound of integration
__lowercase =10.0 # define number of steps or resolution
__lowercase =[a, b] # define boundary of integration
__lowercase =method_a(_lowerCAmelCase , _lowerCAmelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 359 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """spiece.model"""}
lowerCamelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowerCamelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowerCamelCase = 0
lowerCamelCase = 1
lowerCamelCase = 2
lowerCamelCase = 3
lowerCamelCase = 4
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = """left"""
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : Union[str, Any]="</s>" , _lowerCAmelCase : int="<unk>" , _lowerCAmelCase : Union[str, Any]="<sep>" , _lowerCAmelCase : Union[str, Any]="<pad>" , _lowerCAmelCase : Union[str, Any]="<cls>" , _lowerCAmelCase : List[Any]="<mask>" , _lowerCAmelCase : List[Any]=["<eop>", "<eod>"] , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : str , ):
'''simple docstring'''
__lowercase =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else mask_token
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__lowercase =3
__lowercase =do_lower_case
__lowercase =remove_space
__lowercase =keep_accents
__lowercase =vocab_file
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCAmelCase)
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return len(self.sp_model)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase ={self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : str):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =None
return state
def __setstate__( self : List[Any] , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Any):
'''simple docstring'''
if self.remove_space:
__lowercase =' '.join(inputs.strip().split())
else:
__lowercase =inputs
__lowercase =outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__lowercase =unicodedata.normalize('NFKD' , _lowerCAmelCase)
__lowercase =''.join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase)])
if self.do_lower_case:
__lowercase =outputs.lower()
return outputs
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =self.preprocess_text(_lowerCAmelCase)
__lowercase =self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase)
__lowercase =[]
for piece in pieces:
if len(_lowerCAmelCase) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowercase =self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowercase =cur_pieces[1:]
else:
__lowercase =cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowerCAmelCase)
else:
new_pieces.append(_lowerCAmelCase)
return new_pieces
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[str]):
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCAmelCase)
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =''.join(_lowerCAmelCase).replace(_lowerCAmelCase , ' ').strip()
return out_string
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase =kwargs.pop('use_source_tokenizer' , _lowerCAmelCase)
__lowercase =self.convert_ids_to_tokens(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase =[]
__lowercase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase))
__lowercase =[]
sub_texts.append(_lowerCAmelCase)
else:
current_sub_text.append(_lowerCAmelCase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowercase =''.join(_lowerCAmelCase)
__lowercase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase =self.clean_up_tokenization(_lowerCAmelCase)
return clean_text
else:
return text
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase)
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase)) + [1] + ([0] * len(_lowerCAmelCase)) + [1, 1]
return ([0] * len(_lowerCAmelCase)) + [1, 1]
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowercase =os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase , 'wb') as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 48 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_A = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__UpperCAmelCase )
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def extract(*__UpperCAmelCase : str , **__UpperCAmelCase : List[Any] ):
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
_A = torch.ones([0] )
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
self.pixel_values.to(__UpperCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet
_A = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_A = 77
_A = self.dummy_image.to(__UpperCAmelCase )
_A = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_A = AltDiffusionImgaImgPipeline(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , )
_A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase )
_A = alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = "A painting of a squirrel eating a burger"
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_A = alt_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__UpperCAmelCase , )
_A = output.images
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_A = alt_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.dummy_cond_unet
_A = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_A = 77
_A = self.dummy_image.to(__UpperCAmelCase )
# put models in fp16
_A = unet.half()
_A = vae.half()
_A = bert.half()
# make sure here that pndm scheduler skips prk
_A = AltDiffusionImgaImgPipeline(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , )
_A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase )
_A = alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = "A painting of a squirrel eating a burger"
_A = torch.manual_seed(0 )
_A = alt_pipe(
[prompt] , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=__UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_A = init_image.resize((760, 504) )
_A = "BAAI/AltDiffusion"
_A = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A fantasy landscape, trending on artstation"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images[0]
_A = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_A = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_A = init_image.resize((768, 512) )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_A = "BAAI/AltDiffusion"
_A = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A fantasy landscape, trending on artstation"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 79 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = line.strip()
if line:
snake_case_ = line.split()
snake_case_ = line_number
snake_case_ = words[0]
snake_case_ = value
return result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
snake_case_ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = shape_pointer.shape
# let's reduce dimension
snake_case_ = value[0]
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case_ = """.""".join([key, hf_param_name] )
else:
snake_case_ = key
snake_case_ = value if """lm_head""" in full_key else value[0]
__SCREAMING_SNAKE_CASE : int = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case_ = False
for key, mapped_key in MAPPING.items():
snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = """weight"""
else:
snake_case_ = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ = True
else:
snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = full_name.split("""conv_layers.""" )[-1]
snake_case_ = name.split(""".""" )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int:
if config_path is not None:
snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaConfig()
if is_seq_class:
snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = idalabel
snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
snake_case_ = True if config.feat_extract_norm == """layer""" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task="""audio_pretraining""" )
snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
snake_case_ = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 347 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCAmelCase_ : Optional[Any] = '''bart'''
lowerCAmelCase_ : List[str] = True
@st.cache(allow_output_mutation=lowerCAmelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
UpperCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
UpperCAmelCase = qar_model.eval()
else:
UpperCAmelCase , UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
UpperCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
UpperCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
UpperCAmelCase = sas_model.eval()
else:
UpperCAmelCase , UpperCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCAmelCase = faiss.StandardGpuResources()
UpperCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
UpperCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCAmelCase = faiss.IndexFlatIP(128 )
UpperCAmelCase = faiss.index_cpu_to_gpu(lowerCAmelCase , 1 , lowerCAmelCase )
wikiaab_gpu_index_flat.add(lowerCAmelCase ) # TODO fix for larger GPU
else:
UpperCAmelCase , UpperCAmelCase = (None, None)
UpperCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
UpperCAmelCase = elia["""train_eli5"""]
UpperCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase )
return (elia_train, eli5_train_q_index)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = load_indexes()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = load_models()
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = load_train_data()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=10 ):
'''simple docstring'''
UpperCAmelCase = embed_questions_for_retrieval([question] , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = eli5_train_q_index.search(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = [elia_train[int(lowerCAmelCase )] for i in I[0]]
return nn_examples
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase="wiki40b" , lowerCAmelCase="dense" , lowerCAmelCase=10 ):
'''simple docstring'''
if source == "none":
UpperCAmelCase , UpperCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCAmelCase , UpperCAmelCase = query_qa_dense_index(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
UpperCAmelCase , UpperCAmelCase = query_es_index(
lowerCAmelCase , lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=lowerCAmelCase , )
UpperCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
UpperCAmelCase = """question: {} context: {}""".format(lowerCAmelCase , lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase : None),
} )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=64 , lowerCAmelCase=256 , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=0.95 , lowerCAmelCase=0.8 ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase = qa_sas_generate(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_answers=1 , num_beams=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase , do_sample=lowerCAmelCase , temp=lowerCAmelCase , top_p=lowerCAmelCase , top_k=lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
lowerCAmelCase_ : Union[str, Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
lowerCAmelCase_ : Union[str, Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCAmelCase_ : List[Any] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCAmelCase_ : Any = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
lowerCAmelCase_ : Optional[Any] = st.sidebar.checkbox('''Demo options''')
if demo_options:
lowerCAmelCase_ : Union[str, Any] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
lowerCAmelCase_ : Tuple = action_list.index(action_st)
lowerCAmelCase_ : Union[str, Any] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
lowerCAmelCase_ : Optional[Any] = show_type == '''Show full text of passages'''
else:
lowerCAmelCase_ : Dict = 3
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Optional[int] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
lowerCAmelCase_ : int = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
lowerCAmelCase_ : int = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
lowerCAmelCase_ : Tuple = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
lowerCAmelCase_ : List[Any] = '''wiki40b'''
lowerCAmelCase_ : Optional[Any] = '''dense'''
lowerCAmelCase_ : Optional[Any] = '''beam'''
lowerCAmelCase_ : List[str] = 2
lowerCAmelCase_ : List[Any] = 6_4
lowerCAmelCase_ : Optional[int] = 2_5_6
lowerCAmelCase_ : int = None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : str = st.sidebar.checkbox('''Generation options''')
if generate_options:
lowerCAmelCase_ : Union[str, Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
lowerCAmelCase_ : Any = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
lowerCAmelCase_ : Union[str, Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
lowerCAmelCase_ : List[Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
lowerCAmelCase_ : Tuple = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCAmelCase_ : int = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowerCAmelCase_ : Union[str, Any] = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowerCAmelCase_ : List[Any] = None
# start main text
lowerCAmelCase_ : Any = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
lowerCAmelCase_ : List[str] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCAmelCase_ : Tuple = st.text_input('''Enter your question here:''', '''''')
else:
lowerCAmelCase_ : List[Any] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = make_support(question, source=wiki_source, method='''dense''', n_results=1_0)
lowerCAmelCase_ , lowerCAmelCase_ : Any = make_support(question, source=wiki_source, method='''sparse''', n_results=1_0)
lowerCAmelCase_ : List[str] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCAmelCase_ : Union[str, Any] = support_list[:1_0]
lowerCAmelCase_ : Union[str, Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
lowerCAmelCase_ : List[Any] = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
lowerCAmelCase_ : Union[str, Any] = res[1].strip()
if sec_titles == "":
lowerCAmelCase_ : List[str] = '''[{}]({})'''.format(res[0], wiki_url)
else:
lowerCAmelCase_ : int = sec_titles.split(''' & ''')
lowerCAmelCase_ : Optional[Any] = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
lowerCAmelCase_ : str = find_nearest_training(question)
lowerCAmelCase_ : List[str] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
lowerCAmelCase_ : List[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
lowerCAmelCase_ : Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 248 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = False , lowerCAmelCase = 100 , lowerCAmelCase = 0.01 , lowerCAmelCase = 1 , ):
'''simple docstring'''
UpperCAmelCase = False
UpperCAmelCase = search_prob
UpperCAmelCase = start_temperate
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = None
while not search_end:
UpperCAmelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase = current_state
scores.append(lowerCAmelCase )
iterations += 1
UpperCAmelCase = None
UpperCAmelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
UpperCAmelCase = neighbors.pop(lowerCAmelCase )
UpperCAmelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase = picked_neighbor
else:
UpperCAmelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase = picked_neighbor
UpperCAmelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase = True
else:
UpperCAmelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[str] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : int = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : Optional[Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
lowerCAmelCase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 248 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCamelCase__ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCamelCase__ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : int ) ->MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[List[List[str]]] , lowerCamelCase__ : List[List[str]] , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 4 , ) ->Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase__ , hypotheses=lowerCamelCase__ , min_len=lowerCamelCase__ , max_len=lowerCamelCase__ )
}
| 234 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : str = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = "longformer"
def __init__( self : Dict , lowerCAmelCase : Union[List[int], int] = 512 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 30522 , lowerCAmelCase : int = 768 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 3072 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 2 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 1E-12 , lowerCAmelCase : bool = False , **lowerCAmelCase : Union[str, Any] , )-> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : List[Any] , lowerCAmelCase : "PretrainedConfig" , lowerCAmelCase : str = "default" , lowerCAmelCase : "List[PatchingSpec]" = None )-> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = True
@property
def a__( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def a__( self : Tuple )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def a__( self : Dict )-> float:
"""simple docstring"""
return 1E-4
@property
def a__( self : Tuple )-> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def a__( self : int , lowerCAmelCase : "PreTrainedTokenizerBase" , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 91 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 91 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCAmelCase = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 141 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : nn.ModuleList, UpperCamelCase__ : nn.ModuleList, UpperCamelCase__ : List[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : str ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : int =LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(UpperCamelCase__ ) )
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _a( UpperCamelCase__ : Union[str, PreTrainedModel], UpperCamelCase__ : Union[str, Path] = "student", UpperCamelCase__ : Union[int, None] = None, UpperCamelCase__ : Union[int, None] = None, UpperCamelCase__ : List[str]=False, UpperCamelCase__ : Any=None, UpperCamelCase__ : Dict=None, **UpperCamelCase__ : List[Any], ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] ='''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
SCREAMING_SNAKE_CASE__ : str =AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__, UpperCamelCase__ ), f"teacher must be a model or string got type {type(UpperCamelCase__ )}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =teacher.config.to_diff_dict()
try:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
SCREAMING_SNAKE_CASE__ : Dict =teacher_e
if d is None:
SCREAMING_SNAKE_CASE__ : List[Any] =teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config, '''num_encoder_layers''' ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
SCREAMING_SNAKE_CASE__ : Dict =teacher_e
if d is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =teacher_d
if hasattr(teacher.config, '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
SCREAMING_SNAKE_CASE__ : Optional[Any] =teacher.config_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
SCREAMING_SNAKE_CASE__ : Any =student.load_state_dict(teacher.state_dict(), strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
SCREAMING_SNAKE_CASE__ : List[int] =pick_layers_to_copy(UpperCamelCase__, UpperCamelCase__ )
if d_layers_to_copy is None:
SCREAMING_SNAKE_CASE__ : List[int] =pick_layers_to_copy(UpperCamelCase__, UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__, '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block, student.encoder.block, UpperCamelCase__ )
copy_layers(teacher.decoder.block, student.decoder.block, UpperCamelCase__ )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
SCREAMING_SNAKE_CASE__ : List[str] ={
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 152 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32 | """simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ConvNextFeatureExtractor']
a_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 249 | 1 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = 0
if start < end:
UpperCAmelCase_ = randint(snake_case_ , snake_case_ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ , UpperCAmelCase_ = _in_place_partition(snake_case_ , snake_case_ , snake_case_ )
count += _in_place_quick_sort(snake_case_ , snake_case_ , p - 1 )
count += _in_place_quick_sort(snake_case_ , p + 1 , snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = randint(snake_case_ , snake_case_ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ = start - 1
for index in range(snake_case_ , snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCAmelCase_ = new_pivot_index + 1
UpperCAmelCase_ = a[new_pivot_index]
UpperCAmelCase_ = a[index]
UpperCAmelCase_ = temp
UpperCAmelCase_ = a[new_pivot_index + 1]
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_: List[str] =TemporaryFile()
SCREAMING_SNAKE_CASE_: int =1_00 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: str =0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_: List[str] =np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_: str =np.load(outfile)
SCREAMING_SNAKE_CASE_: List[Any] =len(M) - 1
SCREAMING_SNAKE_CASE_: Dict =_in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 106 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ = model(__a )["last_hidden_state"]
UpperCAmelCase_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 106 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = BertTokenizer
__UpperCamelCase : Any = BertTokenizerFast
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : Optional[Any] = filter_non_english
def __magic_name__ ( self : int ):
"""simple docstring"""
super().setUp()
_A: str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_A: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: int = 'UNwant\u00E9d,running'
_A: int = 'unwanted, running'
return input_text, output_text
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Any = self.tokenizer_class(self.vocab_file )
_A: Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __magic_name__ ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_A: Optional[Any] = self.get_tokenizer()
_A: Any = self.get_rust_tokenizer()
_A: int = 'UNwant\u00E9d,running'
_A: Tuple = tokenizer.tokenize(__a )
_A: List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_A: Any = tokenizer.encode(__a , add_special_tokens=__a )
_A: Optional[int] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_A: Optional[Any] = self.get_rust_tokenizer()
_A: Optional[int] = tokenizer.encode(__a )
_A: int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# With lower casing
_A: Dict = self.get_tokenizer(do_lower_case=__a )
_A: Optional[int] = self.get_rust_tokenizer(do_lower_case=__a )
_A: Dict = 'UNwant\u00E9d,running'
_A: int = tokenizer.tokenize(__a )
_A: int = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_A: Dict = tokenizer.encode(__a , add_special_tokens=__a )
_A: str = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_A: List[str] = self.get_rust_tokenizer()
_A: List[Any] = tokenizer.encode(__a )
_A: Tuple = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Any = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: List[Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: List[str] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = BasicTokenizer()
_A: List[str] = 'a\n\'ll !!to?\'d of, can\'t.'
_A: Union[str, Any] = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(__a ) , __a )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_A: Dict = {}
for i, token in enumerate(__a ):
_A: Tuple = i
_A: Union[str, Any] = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Optional[int] = self.get_tokenizer()
_A: int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
_A: Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__a )
_A: Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a )
_A: List[str] = tokenizer.build_inputs_with_special_tokens(__a )
_A: Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_A: Dict = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_A: Optional[int] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_A: Optional[int] = tokenizer_r.do_lower_case if hasattr(__a , '''do_lower_case''' ) else False
_A: Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Union[str, Any] = ['的', '人', '有']
_A: List[str] = ''.join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: Any = True
_A: Any = self.tokenizer_class.from_pretrained(__a , **__a )
_A: Any = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_A: Tuple = tokenizer_p.encode(__a , add_special_tokens=__a )
_A: Union[str, Any] = tokenizer_r.encode(__a , add_special_tokens=__a )
_A: Dict = tokenizer_r.convert_ids_to_tokens(__a )
_A: Optional[Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_A: Union[str, Any] = False
_A: List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_A: Optional[int] = self.tokenizer_class.from_pretrained(__a , **__a )
_A: List[str] = tokenizer_r.encode(__a , add_special_tokens=__a )
_A: List[str] = tokenizer_p.encode(__a , add_special_tokens=__a )
_A: str = tokenizer_r.convert_ids_to_tokens(__a )
_A: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_A: Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
| 121 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase( lowercase_ ) -> List[Any]:
'''simple docstring'''
snake_case_ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case_ = 128
elif "12-12" in model_name:
snake_case_ = 12
snake_case_ = 12
elif "14-14" in model_name:
snake_case_ = 14
snake_case_ = 14
elif "16-16" in model_name:
snake_case_ = 16
snake_case_ = 16
else:
raise ValueError("""Model not supported""" )
snake_case_ = """huggingface/label-files"""
if "speech-commands" in model_name:
snake_case_ = 35
snake_case_ = """speech-commands-v2-id2label.json"""
else:
snake_case_ = 527
snake_case_ = """audioset-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(lowercase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase( lowercase_ ) -> Optional[int]:
'''simple docstring'''
if "module.v" in name:
snake_case_ = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
snake_case_ = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
snake_case_ = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
snake_case_ = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case_ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
snake_case_ = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
snake_case_ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case_ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case_ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case_ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case_ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case_ = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case_ = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
snake_case_ = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
snake_case_ = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def UpperCamelCase( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
snake_case_ = key.split(""".""" )
snake_case_ = int(key_split[3] )
snake_case_ = config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = val
return orig_state_dict
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
snake_case_ = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
@torch.no_grad()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=False ) -> List[Any]:
'''simple docstring'''
snake_case_ = get_audio_spectrogram_transformer_config(lowercase_ )
snake_case_ = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
snake_case_ = model_name_to_url[model_name]
snake_case_ = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
# remove some keys
remove_keys(lowercase_ )
# rename some keys
snake_case_ = convert_state_dict(lowercase_ , lowercase_ )
# load 🤗 model
snake_case_ = ASTForAudioClassification(lowercase_ )
model.eval()
model.load_state_dict(lowercase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case_ = -4.2_67_73_93 if """speech-commands""" not in model_name else -6.84_59_78
snake_case_ = 4.5_68_99_74 if """speech-commands""" not in model_name else 5.5_65_45_26
snake_case_ = 1024 if """speech-commands""" not in model_name else 128
snake_case_ = ASTFeatureExtractor(mean=lowercase_ , std=lowercase_ , max_length=lowercase_ )
if "speech-commands" in model_name:
snake_case_ = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
snake_case_ = dataset[0]["""audio"""]["""array"""]
else:
snake_case_ = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
snake_case_ , snake_case_ = torchaudio.load(lowercase_ )
snake_case_ = waveform.squeeze().numpy()
snake_case_ = feature_extractor(lowercase_ , sampling_rate=16000 , return_tensors="""pt""" )
# forward pass
snake_case_ = model(**lowercase_ )
snake_case_ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case_ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case_ = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case_ = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case_ = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case_ = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case_ = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case_ = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case_ = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(lowercase_ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 34 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Union[str, Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = GPTNeoXJapaneseModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase )
snake_case_ = output_from_no_past["""hidden_states"""][0]
snake_case_ = model(
lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["""hidden_states"""][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowerCamelCase_ : Any = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase_ : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : Any = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Optional[int] = False
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> str:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = """abeja/gpt-neox-japanese-2.7b"""
snake_case_ = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
snake_case_ = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(lowerCamelCase , return_tensors="""pt""" ).input_ids
snake_case_ = model.generate(lowerCamelCase , max_length=50 )
snake_case_ = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase ) | 34 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.