code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : int = AudioLDMPipeline
lowerCamelCase : Tuple = TEXT_TO_AUDIO_PARAMS
lowerCamelCase : str = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCamelCase : Any = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(3_2, 6_4) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=A , )
lowerCamelCase_ : List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
lowerCamelCase_ : List[str] = ClapTextModelWithProjection(A )
lowerCamelCase_ : Optional[Any] = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=7_7 )
lowerCamelCase_ : Tuple = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=A , )
lowerCamelCase_ : List[Any] = SpeechTaHifiGan(A )
lowerCamelCase_ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def UpperCAmelCase__ (self , A , A=0 ):
if str(A ).startswith('''mps''' ):
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(A )
else:
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase_ : List[str] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Any = self.get_dummy_components()
lowerCamelCase_ : str = AudioLDMPipeline(**A )
lowerCamelCase_ : Optional[Any] = audioldm_pipe.to(A )
audioldm_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : str = self.get_dummy_inputs(A )
lowerCamelCase_ : List[Any] = audioldm_pipe(**A )
lowerCamelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(A ) == 2_5_6
lowerCamelCase_ : Any = audio[:1_0]
lowerCamelCase_ : List[str] = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : str = AudioLDMPipeline(**A )
lowerCamelCase_ : Dict = audioldm_pipe.to(A )
lowerCamelCase_ : Tuple = audioldm_pipe.to(A )
audioldm_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[int] = self.get_dummy_inputs(A )
lowerCamelCase_ : str = 3 * [inputs['''prompt''']]
# forward
lowerCamelCase_ : List[str] = audioldm_pipe(**A )
lowerCamelCase_ : Tuple = output.audios[0]
lowerCamelCase_ : Any = self.get_dummy_inputs(A )
lowerCamelCase_ : Any = 3 * [inputs.pop('''prompt''' )]
lowerCamelCase_ : List[str] = audioldm_pipe.tokenizer(
A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
lowerCamelCase_ : Dict = text_inputs['''input_ids'''].to(A )
lowerCamelCase_ : Dict = audioldm_pipe.text_encoder(
A , )
lowerCamelCase_ : Tuple = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase_ : Optional[Any] = F.normalize(A , dim=-1 )
lowerCamelCase_ : Tuple = prompt_embeds
# forward
lowerCamelCase_ : Optional[Any] = audioldm_pipe(**A )
lowerCamelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : List[str] = AudioLDMPipeline(**A )
lowerCamelCase_ : Optional[Any] = audioldm_pipe.to(A )
lowerCamelCase_ : Union[str, Any] = audioldm_pipe.to(A )
audioldm_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : str = self.get_dummy_inputs(A )
lowerCamelCase_ : Optional[Any] = 3 * ['''this is a negative prompt''']
lowerCamelCase_ : Dict = negative_prompt
lowerCamelCase_ : Tuple = 3 * [inputs['''prompt''']]
# forward
lowerCamelCase_ : List[str] = audioldm_pipe(**A )
lowerCamelCase_ : Any = output.audios[0]
lowerCamelCase_ : Union[str, Any] = self.get_dummy_inputs(A )
lowerCamelCase_ : int = 3 * [inputs.pop('''prompt''' )]
lowerCamelCase_ : Union[str, Any] = []
for p in [prompt, negative_prompt]:
lowerCamelCase_ : Any = audioldm_pipe.tokenizer(
A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
lowerCamelCase_ : Any = text_inputs['''input_ids'''].to(A )
lowerCamelCase_ : Dict = audioldm_pipe.text_encoder(
A , )
lowerCamelCase_ : int = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase_ : int = F.normalize(A , dim=-1 )
embeds.append(A )
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = embeds
# forward
lowerCamelCase_ : Dict = audioldm_pipe(**A )
lowerCamelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = AudioLDMPipeline(**A )
lowerCamelCase_ : Union[str, Any] = audioldm_pipe.to(A )
audioldm_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = self.get_dummy_inputs(A )
lowerCamelCase_ : List[str] = '''egg cracking'''
lowerCamelCase_ : List[str] = audioldm_pipe(**A , negative_prompt=A )
lowerCamelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(A ) == 2_5_6
lowerCamelCase_ : Optional[Any] = audio[:1_0]
lowerCamelCase_ : List[str] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Any = self.get_dummy_components()
lowerCamelCase_ : str = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Optional[int] = AudioLDMPipeline(**A )
lowerCamelCase_ : str = audioldm_pipe.to(A )
audioldm_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[int] = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowerCamelCase_ : Union[str, Any] = audioldm_pipe(A , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCamelCase_ : Any = 2
lowerCamelCase_ : str = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
lowerCamelCase_ : Any = 2
lowerCamelCase_ : str = audioldm_pipe(A , num_inference_steps=2 , num_waveforms_per_prompt=A ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
lowerCamelCase_ : Optional[Any] = 2
lowerCamelCase_ : Union[str, Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=A ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : str = self.get_dummy_components()
lowerCamelCase_ : Any = AudioLDMPipeline(**A )
lowerCamelCase_ : Any = audioldm_pipe.to(A )
audioldm_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Dict = audioldm_pipe.vocoder.config.sampling_rate
lowerCamelCase_ : Optional[Any] = self.get_dummy_inputs(A )
lowerCamelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.0_16 , **A )
lowerCamelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(A ) / vocoder_sampling_rate == 0.0_16
lowerCamelCase_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.0_32 , **A )
lowerCamelCase_ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(A ) / vocoder_sampling_rate == 0.0_32
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : List[Any] = AudioLDMPipeline(**A )
lowerCamelCase_ : Tuple = audioldm_pipe.to(A )
audioldm_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Dict = ['''hey''']
lowerCamelCase_ : int = audioldm_pipe(A , num_inference_steps=1 )
lowerCamelCase_ : Optional[Any] = output.audios.shape
assert audio_shape == (1, 2_5_6)
lowerCamelCase_ : Any = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCamelCase_ : Tuple = SpeechTaHifiGan(A ).to(A )
lowerCamelCase_ : int = audioldm_pipe(A , num_inference_steps=1 )
lowerCamelCase_ : Tuple = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCAmelCase__ (self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A )
def UpperCAmelCase__ (self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=A )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ (self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A )
@slow
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self , A , A="cpu" , A=torch.floataa , A=0 ):
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase_ : Tuple = np.random.RandomState(A ).standard_normal((1, 8, 1_2_8, 1_6) )
lowerCamelCase_ : Optional[int] = torch.from_numpy(A ).to(device=A , dtype=A )
lowerCamelCase_ : Tuple = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowerCamelCase_ : List[str] = audioldm_pipe.to(A )
audioldm_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = self.get_inputs(A )
lowerCamelCase_ : Any = 2_5
lowerCamelCase_ : int = audioldm_pipe(**A ).audios[0]
assert audio.ndim == 1
assert len(A ) == 8_1_9_2_0
lowerCamelCase_ : Tuple = audio[7_7_2_3_0:7_7_2_4_0]
lowerCamelCase_ : Optional[int] = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowerCamelCase_ : List[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowerCamelCase_ : int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCamelCase_ : Tuple = audioldm_pipe.to(A )
audioldm_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Any = self.get_inputs(A )
lowerCamelCase_ : Optional[int] = audioldm_pipe(**A ).audios[0]
assert audio.ndim == 1
assert len(A ) == 8_1_9_2_0
lowerCamelCase_ : List[str] = audio[2_7_7_8_0:2_7_7_9_0]
lowerCamelCase_ : List[Any] = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowerCamelCase_ : List[str] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 318 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 |
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318 | 1 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 318 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
lowerCamelCase_ : Union[str, Any] = np.full((len(_lowercase ), sequence_length, 2) , _lowercase )
else:
lowerCamelCase_ : Optional[Any] = np.full((len(_lowercase ), sequence_length) , _lowercase )
for i, tensor in enumerate(_lowercase ):
if padding_side == "right":
if isinstance(_lowercase , _lowercase ):
lowerCamelCase_ : List[Any] = tensor[:sequence_length]
else:
lowerCamelCase_ : int = tensor[:sequence_length]
else:
if isinstance(_lowercase , _lowercase ):
lowerCamelCase_ : Dict = tensor[:sequence_length]
else:
lowerCamelCase_ : Any = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = ord(_lowercase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCamelCase_ : Any = unicodedata.category(_lowercase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowercase ( _lowercase ):
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : int = -100
lowerCamelCase : str = "pt"
def UpperCAmelCase__ (self , A ):
import torch
lowerCamelCase_ : Optional[int] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowerCamelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowerCamelCase_ : str = self.tokenizer.padding_side
if padding_side == "right":
lowerCamelCase_ : Any = [
list(A ) + [self.label_pad_token_id] * (sequence_length - len(A )) for label in labels
]
else:
lowerCamelCase_ : int = [
[self.label_pad_token_id] * (sequence_length - len(A )) + list(A ) for label in labels
]
lowerCamelCase_ : Optional[int] = [feature['''ner_tags'''] for feature in features]
lowerCamelCase_ : Dict = padding_tensor(A , -1 , A , A )
lowerCamelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
lowerCamelCase_ : List[str] = padding_tensor(A , (-1, -1) , A , A )
lowerCamelCase_ : Dict = {k: torch.tensor(A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 318 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowercase : List[str] = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowercase : Union[str, Any] = '''UperNetConfig'''
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A = 0 , A = False , A = 1 , ):
super().__init__()
lowerCamelCase_ : Optional[Any] = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , padding=A , bias=A , dilation=A , )
lowerCamelCase_ : Any = nn.BatchNormad(A )
lowerCamelCase_ : List[str] = nn.ReLU()
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = self.conv(A )
lowerCamelCase_ : Union[str, Any] = self.batch_norm(A )
lowerCamelCase_ : Tuple = self.activation(A )
return output
class __lowercase ( nn.Module ):
def __init__(self , A , A , A ):
super().__init__()
lowerCamelCase_ : Any = [
nn.AdaptiveAvgPoolad(A ),
UperNetConvModule(A , A , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(A ) , A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : str = input
for layer in self.layers:
lowerCamelCase_ : Optional[int] = layer(A )
return hidden_state
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A ):
super().__init__()
lowerCamelCase_ : Any = pool_scales
lowerCamelCase_ : List[Any] = align_corners
lowerCamelCase_ : str = in_channels
lowerCamelCase_ : Tuple = channels
lowerCamelCase_ : List[Any] = []
for i, pool_scale in enumerate(A ):
lowerCamelCase_ : List[Any] = UperNetPyramidPoolingBlock(pool_scale=A , in_channels=A , channels=A )
self.blocks.append(A )
self.add_module(str(A ) , A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[Any] = []
for ppm in self.blocks:
lowerCamelCase_ : int = ppm(A )
lowerCamelCase_ : Union[str, Any] = nn.functional.interpolate(
A , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(A )
return ppm_outs
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = config
lowerCamelCase_ : str = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCamelCase_ : Optional[Any] = in_channels
lowerCamelCase_ : str = config.hidden_size
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCamelCase_ : Any = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCamelCase_ : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCamelCase_ : int = nn.ModuleList()
lowerCamelCase_ : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCamelCase_ : Optional[int] = UperNetConvModule(A , self.channels , kernel_size=1 )
lowerCamelCase_ : Any = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(A )
self.fpn_convs.append(A )
lowerCamelCase_ : Tuple = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ (self ):
self.apply(self._init_weights )
def UpperCAmelCase__ (self , A ):
if isinstance(A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = inputs[-1]
lowerCamelCase_ : Any = [x]
psp_outs.extend(self.psp_modules(A ) )
lowerCamelCase_ : int = torch.cat(A , dim=1 )
lowerCamelCase_ : int = self.bottleneck(A )
return output
def UpperCAmelCase__ (self , A ):
# build laterals
lowerCamelCase_ : Dict = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(A ) )
# build top-down path
lowerCamelCase_ : Dict = len(A )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase_ : Tuple = laterals[i - 1].shape[2:]
lowerCamelCase_ : Dict = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=A , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
lowerCamelCase_ : Union[str, Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase_ : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
lowerCamelCase_ : int = torch.cat(A , dim=1 )
lowerCamelCase_ : Optional[Any] = self.fpn_bottleneck(A )
lowerCamelCase_ : List[Any] = self.classifier(A )
return output
class __lowercase ( nn.Module ):
def __init__(self , A , A = 2 , A = 3 , A = 1 ):
super().__init__()
lowerCamelCase_ : Optional[int] = config
lowerCamelCase_ : List[str] = config.auxiliary_in_channels
lowerCamelCase_ : Any = config.auxiliary_channels
lowerCamelCase_ : Optional[int] = config.auxiliary_num_convs
lowerCamelCase_ : Any = config.auxiliary_concat_input
lowerCamelCase_ : Dict = in_index
lowerCamelCase_ : Dict = (kernel_size // 2) * dilation
lowerCamelCase_ : Tuple = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=A , padding=A , dilation=A ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=A , padding=A , dilation=A ) )
if self.num_convs == 0:
lowerCamelCase_ : List[str] = nn.Identity()
else:
lowerCamelCase_ : Tuple = nn.Sequential(*A )
if self.concat_input:
lowerCamelCase_ : int = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=A , padding=kernel_size // 2 )
lowerCamelCase_ : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ (self ):
self.apply(self._init_weights )
def UpperCAmelCase__ (self , A ):
if isinstance(A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ (self , A ):
# just take the relevant feature maps
lowerCamelCase_ : Dict = encoder_hidden_states[self.in_index]
lowerCamelCase_ : Optional[int] = self.convs(A )
if self.concat_input:
lowerCamelCase_ : Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCamelCase_ : Any = self.classifier(A )
return output
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = UperNetConfig
lowerCamelCase : Any = "pixel_values"
lowerCamelCase : Optional[int] = True
def UpperCAmelCase__ (self , A ):
if isinstance(A , A ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ (self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ (self , A , A=False ):
if isinstance(A , A ):
lowerCamelCase_ : Dict = value
__lowercase : Optional[Any] = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowercase : Dict = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowercase , )
class __lowercase ( _lowercase ):
def __init__(self , A ):
super().__init__(A )
lowerCamelCase_ : Union[str, Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCamelCase_ : List[Any] = UperNetHead(A , in_channels=self.backbone.channels )
lowerCamelCase_ : Dict = UperNetFCNHead(A ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=A , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ (self , A = None , A = None , A = None , A = None , A = None , ):
lowerCamelCase_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ : int = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCamelCase_ : Optional[int] = self.backbone.forward_with_filtered_kwargs(
A , output_hidden_states=A , output_attentions=A )
lowerCamelCase_ : str = outputs.feature_maps
lowerCamelCase_ : int = self.decode_head(A )
lowerCamelCase_ : str = nn.functional.interpolate(A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=A )
lowerCamelCase_ : Union[str, Any] = None
if self.auxiliary_head is not None:
lowerCamelCase_ : Dict = self.auxiliary_head(A )
lowerCamelCase_ : Optional[int] = nn.functional.interpolate(
A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=A )
lowerCamelCase_ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowerCamelCase_ : List[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCamelCase_ : List[Any] = loss_fct(A , A )
lowerCamelCase_ : List[str] = loss_fct(A , A )
lowerCamelCase_ : int = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCamelCase_ : Optional[int] = (logits,) + outputs[1:]
else:
lowerCamelCase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=A , logits=A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 318 |
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : List[str] = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 318 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__lowercase : Any = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
__lowercase : int = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
__lowercase : Any = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCAmelCase__ (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Optional[Any] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ : Union[str, Any] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ : Optional[int] = evaluate(dataset=A , predictions=A )
return score
| 318 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318 | 1 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__lowercase : Optional[Any] = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__lowercase : List[str] = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = calculate_rouge(_lowercase , _lowercase , bootstrap_aggregation=_lowercase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowercase , _lowercase )
lowerCamelCase_ : List[Any] = calculate_rouge(_lowercase , _lowercase , bootstrap_aggregation=_lowercase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : int = '''rougeLsum'''
lowerCamelCase_ : Optional[Any] = calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=[k] )[k]
lowerCamelCase_ : str = calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = ['''rouge1''', '''rouge2''', '''rougeL''']
lowerCamelCase_ : List[Any] = calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=_lowercase )
lowerCamelCase_ : Optional[int] = calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=_lowercase )
assert score_sep == score_no_sep
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : Tuple = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
lowerCamelCase_ : Tuple = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase ) == calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase )
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
lowerCamelCase_ : Dict = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
lowerCamelCase_ : Dict = calculate_rouge(_lowercase , _lowercase , rouge_keys=['''rougeLsum'''] , newline_sep=_lowercase )['''rougeLsum''']
lowerCamelCase_ : int = calculate_rouge(_lowercase , _lowercase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
lowerCamelCase_ : str = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowercase , _lowercase )
lowerCamelCase_ : List[Any] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=_lowercase )
assert isinstance(_lowercase , _lowercase )
| 318 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : int = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __lowercase ( _lowercase ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
class __lowercase ( _lowercase , _lowercase ):
lowerCamelCase : Dict = 1
@register_to_config
def __init__(self , A = 2_0_0_0 , A = 0.15 , A = 0.01 , A = 13_48.0 , A = 1E-5 , A = 1 , ):
# standard deviation of the initial noise distribution
lowerCamelCase_ : Tuple = sigma_max
# setable values
lowerCamelCase_ : Optional[Any] = None
self.set_sigmas(A , A , A , A )
def UpperCAmelCase__ (self , A , A = None ):
return sample
def UpperCAmelCase__ (self , A , A = None , A = None ):
lowerCamelCase_ : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ : str = torch.linspace(1 , A , A , device=A )
def UpperCAmelCase__ (self , A , A = None , A = None , A = None ):
lowerCamelCase_ : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(A , A )
lowerCamelCase_ : Any = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ : Union[str, Any] = torch.exp(torch.linspace(math.log(A ) , math.log(A ) , A ) )
lowerCamelCase_ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase__ (self , A , A ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase__ (self , A , A , A , A = None , A = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ : List[str] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ : int = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ : Optional[Any] = timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ : int = self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ : str = self.get_adjacent_sigma(A , A ).to(sample.device )
lowerCamelCase_ : Dict = torch.zeros_like(A )
lowerCamelCase_ : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ : List[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ : Union[str, Any] = diffusion.unsqueeze(-1 )
lowerCamelCase_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ : int = randn_tensor(
sample.shape , layout=sample.layout , generator=A , device=sample.device , dtype=sample.dtype )
lowerCamelCase_ : Tuple = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=A , prev_sample_mean=A )
def UpperCAmelCase__ (self , A , A , A = None , A = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ : Any = randn_tensor(sample.shape , layout=sample.layout , generator=A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase_ : str = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase_ : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ : Union[str, Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ : List[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ : Union[str, Any] = step_size.unsqueeze(-1 )
lowerCamelCase_ : Dict = sample + step_size * model_output
lowerCamelCase_ : Optional[int] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def UpperCAmelCase__ (self , A , A , A , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCamelCase_ : List[str] = timesteps.to(original_samples.device )
lowerCamelCase_ : int = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(A ) * sigmas[:, None, None, None]
)
lowerCamelCase_ : Optional[Any] = noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 318 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = tempfile.mkdtemp()
lowerCamelCase_ : int = BlipImageProcessor()
lowerCamelCase_ : List[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowerCamelCase_ : List[Any] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
lowerCamelCase_ : Union[str, Any] = InstructBlipProcessor(A , A , A )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ (self , **A ):
return AutoProcessor.from_pretrained(self.tmpdirname , **A ).tokenizer
def UpperCAmelCase__ (self , **A ):
return AutoProcessor.from_pretrained(self.tmpdirname , **A ).image_processor
def UpperCAmelCase__ (self , **A ):
return AutoProcessor.from_pretrained(self.tmpdirname , **A ).qformer_tokenizer
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ : int = self.get_image_processor(do_normalize=A , padding_value=1.0 )
lowerCamelCase_ : Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
self.assertIsInstance(processor.qformer_tokenizer , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : str = self.get_qformer_tokenizer()
lowerCamelCase_ : Optional[Any] = InstructBlipProcessor(
tokenizer=A , image_processor=A , qformer_tokenizer=A )
lowerCamelCase_ : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Tuple = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = self.get_qformer_tokenizer()
lowerCamelCase_ : Any = InstructBlipProcessor(
tokenizer=A , image_processor=A , qformer_tokenizer=A )
lowerCamelCase_ : Union[str, Any] = '''lower newer'''
lowerCamelCase_ : Union[str, Any] = processor(text=A )
lowerCamelCase_ : List[Any] = tokenizer(A , return_token_type_ids=A )
lowerCamelCase_ : str = qformer_tokenizer(A , return_token_type_ids=A )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Dict = self.get_tokenizer()
lowerCamelCase_ : Tuple = self.get_qformer_tokenizer()
lowerCamelCase_ : Optional[Any] = InstructBlipProcessor(
tokenizer=A , image_processor=A , qformer_tokenizer=A )
lowerCamelCase_ : Any = '''lower newer'''
lowerCamelCase_ : int = self.prepare_image_inputs()
lowerCamelCase_ : Optional[Any] = processor(text=A , images=A )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Dict = self.get_tokenizer()
lowerCamelCase_ : List[str] = self.get_qformer_tokenizer()
lowerCamelCase_ : List[str] = InstructBlipProcessor(
tokenizer=A , image_processor=A , qformer_tokenizer=A )
lowerCamelCase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Optional[int] = processor.batch_decode(A )
lowerCamelCase_ : int = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : List[Any] = self.get_tokenizer()
lowerCamelCase_ : List[str] = self.get_qformer_tokenizer()
lowerCamelCase_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=A , image_processor=A , qformer_tokenizer=A )
lowerCamelCase_ : Optional[int] = '''lower newer'''
lowerCamelCase_ : Tuple = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 318 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowercase :
def __init__(self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ):
lowerCamelCase_ : Any = parent
lowerCamelCase_ : List[str] = batch_size
lowerCamelCase_ : List[str] = seq_length
lowerCamelCase_ : Dict = is_training
lowerCamelCase_ : int = use_token_type_ids
lowerCamelCase_ : int = use_labels
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : Optional[int] = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : str = hidden_dropout_prob
lowerCamelCase_ : List[Any] = attention_probs_dropout_prob
lowerCamelCase_ : List[str] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : Union[str, Any] = type_sequence_label_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Union[str, Any] = num_labels
lowerCamelCase_ : List[str] = num_choices
lowerCamelCase_ : List[str] = scope
lowerCamelCase_ : int = self.vocab_size - 1
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Union[str, Any] = None
lowerCamelCase_ : str = None
lowerCamelCase_ : List[str] = None
if self.use_labels:
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Optional[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ (self , A , A , A , A , *A ):
lowerCamelCase_ : Any = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(A , token_type_ids=A , head_mask=A )
lowerCamelCase_ : List[Any] = model(A , token_type_ids=A )
lowerCamelCase_ : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A , A , *A ):
lowerCamelCase_ : int = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
lowerCamelCase_ : Tuple = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ (self , A , A , A , A , *A ):
lowerCamelCase_ : int = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ (self , A , A , A , A , *A ):
lowerCamelCase_ : str = self.num_labels
lowerCamelCase_ : List[Any] = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Union[str, Any] = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : Optional[int] = config_and_inputs
lowerCamelCase_ : Any = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : List[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : str = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ (self , A , A , A , A , A ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase__ (self , A , A , A=False ):
lowerCamelCase_ : Optional[Any] = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
lowerCamelCase_ : Tuple = inputs_dict['''labels''']
lowerCamelCase_ : Dict = inputs_dict['''labels''']
lowerCamelCase_ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
lowerCamelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = OpenAIGPTModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=A , n_embd=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def UpperCAmelCase__ (self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Tuple = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
lowerCamelCase_ : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=A ) # the president is
lowerCamelCase_ : Dict = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ : List[str] = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A )
| 318 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318 | 1 |
'''simple docstring'''
from typing import Any
import numpy as np
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
return np.array_equal(_lowercase , matrix.conjugate().T )
def lowercase_ ( _lowercase , _lowercase ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Tuple = v.conjugate().T
lowerCamelCase_ : Union[str, Any] = v_star.dot(_lowercase )
assert isinstance(_lowercase , np.ndarray )
return (v_star_dot.dot(_lowercase )) / (v_star.dot(_lowercase ))
def lowercase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase_ : int = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowerCamelCase_ : Union[str, Any] = np.array([[1], [2], [3]] )
assert is_hermitian(_lowercase ), F"""{a} is not hermitian."""
print(rayleigh_quotient(_lowercase , _lowercase ) )
lowerCamelCase_ : str = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowercase ), F"""{a} is not hermitian."""
assert rayleigh_quotient(_lowercase , _lowercase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 318 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | 1 |
'''simple docstring'''
from collections import defaultdict
def lowercase_ ( _lowercase , _lowercase ) -> bool:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = first_str.lower().strip()
lowerCamelCase_ : int = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ : Any = first_str.replace(''' ''' , '''''' )
lowerCamelCase_ : Dict = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_lowercase ) != len(_lowercase ):
return False
# Default values for count should be 0
lowerCamelCase_ : defaultdict[str, int] = defaultdict(_lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowercase : List[str] = input('''Enter the first string ''').strip()
__lowercase : Tuple = input('''Enter the second string ''').strip()
__lowercase : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 318 |
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , ) -> list[float]:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : str = coefficient_matrix.shape
lowerCamelCase_, lowerCamelCase_ : Dict = constant_matrix.shape
if rowsa != colsa:
lowerCamelCase_ : Optional[int] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowercase )
if colsa != 1:
lowerCamelCase_ : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowercase )
if rowsa != rowsa:
lowerCamelCase_ : Union[str, Any] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowercase )
if len(_lowercase ) != rowsa:
lowerCamelCase_ : List[str] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(_lowercase )} and {rowsa}"""
)
raise ValueError(_lowercase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowerCamelCase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase_, lowerCamelCase_ : str = table.shape
strictly_diagonally_dominant(_lowercase )
# Iterates the whole matrix for given number of times
for _ in range(_lowercase ):
lowerCamelCase_ : Optional[Any] = []
for row in range(_lowercase ):
lowerCamelCase_ : Tuple = 0
for col in range(_lowercase ):
if col == row:
lowerCamelCase_ : Optional[int] = table[row][col]
elif col == cols - 1:
lowerCamelCase_ : int = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase_ : Optional[Any] = (temp + val) / denom
new_val.append(_lowercase )
lowerCamelCase_ : Tuple = new_val
return [float(_lowercase ) for i in new_val]
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : Dict = table.shape
lowerCamelCase_ : Dict = True
for i in range(0 , _lowercase ):
lowerCamelCase_ : Optional[int] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : Any = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowercase_ ( ) -> str:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowerCamelCase_ : Optional[Any] = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , _lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
assert _test_patching.open is open
lowerCamelCase_ : Optional[int] = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , _lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : Any = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , _lowercase ):
pass
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : Any = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , _lowercase ) is None
with patch_submodule(_test_patching , '''len''' , _lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''__test_patch_submodule_start_and_stop_mock__'''
lowerCamelCase_ : List[Any] = patch_submodule(_test_patching , '''open''' , _lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowerCamelCase_ : Union[str, Any] = '''__test_patch_submodule_successive_join__'''
lowerCamelCase_ : Tuple = '''__test_patch_submodule_successive_dirname__'''
lowerCamelCase_ : Union[str, Any] = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , _lowercase ):
with patch_submodule(_test_patching , '''os.rename''' , _lowercase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , _lowercase ):
with patch_submodule(_test_patching , '''os.path.join''' , _lowercase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , _lowercase ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , _lowercase ):
pass
| 318 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowercase :
def __init__(self , A , A=1_3 , A=1_0 , A=3 , A=2 , A=2 , A=2 , A=True , A=True , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=1_0 , A=0.02 , A=0.9 , A=None , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Any = batch_size
lowerCamelCase_ : int = image_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Union[str, Any] = patch_size
lowerCamelCase_ : int = tubelet_size
lowerCamelCase_ : Any = num_frames
lowerCamelCase_ : Union[str, Any] = is_training
lowerCamelCase_ : List[Any] = use_labels
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : Optional[Any] = num_attention_heads
lowerCamelCase_ : str = intermediate_size
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : str = type_sequence_label_size
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Tuple = mask_ratio
lowerCamelCase_ : Optional[int] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase_ : Dict = (image_size // patch_size) ** 2
lowerCamelCase_ : Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase_ : Optional[int] = int(mask_ratio * self.seq_length )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : List[Any] = None
if self.use_labels:
lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Optional[Any] = VideoMAEModel(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : Dict = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Optional[int] = VideoMAEForPreTraining(A )
model.to(A )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ : Tuple = torch.ones((self.num_masks,) )
lowerCamelCase_ : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase_ : List[str] = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase_ : Any = model(A , A )
# model only returns predictions for masked patches
lowerCamelCase_ : Optional[Any] = mask.sum().item()
lowerCamelCase_ : Union[str, Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = config_and_inputs
lowerCamelCase_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : int = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase : Dict = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Tuple = False
lowerCamelCase : Tuple = False
lowerCamelCase : Optional[Any] = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = VideoMAEModelTester(self )
lowerCamelCase_ : Dict = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def UpperCAmelCase__ (self , A , A , A=False ):
lowerCamelCase_ : Tuple = copy.deepcopy(A )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ : Tuple = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase_ : Union[str, Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase_ : str = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase_ : int = bool_masked_pos.to(A )
if return_labels:
if model_class in [
*get_values(A ),
]:
lowerCamelCase_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[int] = model_class(A )
lowerCamelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
@slow
def UpperCAmelCase__ (self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[Any] = VideoMAEModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase__ (self ):
if not self.has_attentions:
pass
else:
lowerCamelCase_, lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : int = True
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowerCamelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase_ : List[str] = model(**self._prepare_for_class(A , A ) )
lowerCamelCase_ : Tuple = outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase_ : Union[str, Any] = len(A )
# Check attention is always last and order is fine
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : str = True
lowerCamelCase_ : Dict = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase_ : int = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 1 , len(A ) )
lowerCamelCase_ : str = outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase__ (self ):
def check_hidden_states_output(A , A , A ):
lowerCamelCase_ : int = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Optional[int] = model(**self._prepare_for_class(A , A ) )
lowerCamelCase_ : List[Any] = outputs.hidden_states
lowerCamelCase_ : int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A ) , A )
lowerCamelCase_ : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_, lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Optional[int] = True
check_hidden_states_output(A , A , A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ (self ):
pass
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : int = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase_ : str = np.load(_lowercase )
return list(_lowercase )
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
A )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : Any = prepare_video()
lowerCamelCase_ : Optional[int] = image_processor(A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Dict = model(**A )
# verify the logits
lowerCamelCase_ : Tuple = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , A )
lowerCamelCase_ : Union[str, Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(A )
lowerCamelCase_ : Tuple = self.default_image_processor
lowerCamelCase_ : List[Any] = prepare_video()
lowerCamelCase_ : Union[str, Any] = image_processor(A , return_tensors='''pt''' ).to(A )
# add boolean mask, indicating which patches to mask
lowerCamelCase_ : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase_ : int = torch.load(A )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Any = model(**A )
# verify the logits
lowerCamelCase_ : Optional[int] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCamelCase_ : Dict = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=A )
self.assertEqual(outputs.logits.shape , A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase_ : Any = torch.tensor([0.51_42] , device=A )
self.assertTrue(torch.allclose(outputs.loss , A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase_ : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=A ).to(
A )
with torch.no_grad():
lowerCamelCase_ : List[Any] = model(**A )
lowerCamelCase_ : int = torch.tensor(torch.tensor([0.64_69] ) , device=A )
self.assertTrue(torch.allclose(outputs.loss , A , atol=1E-4 ) )
| 318 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "megatron-bert"
def __init__(self , A=2_9_0_5_6 , A=1_0_2_4 , A=2_4 , A=1_6 , A=4_0_9_6 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=0 , A="absolute" , A=True , **A , ):
super().__init__(pad_token_id=A , **A )
lowerCamelCase_ : List[Any] = vocab_size
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Dict = num_attention_heads
lowerCamelCase_ : List[str] = hidden_act
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : int = attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = type_vocab_size
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : int = layer_norm_eps
lowerCamelCase_ : Optional[int] = position_embedding_type
lowerCamelCase_ : Dict = use_cache
| 318 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : List[str] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "rwkv"
lowerCamelCase : Union[str, Any] = {"max_position_embeddings": "context_length"}
def __init__(self , A=5_0_2_7_7 , A=1_0_2_4 , A=4_0_9_6 , A=3_2 , A=None , A=None , A=1E-5 , A=0 , A=0 , A=6 , A=False , A=True , **A , ):
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : Optional[int] = context_length
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase_ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase_ : Optional[int] = layer_norm_epsilon
lowerCamelCase_ : int = rescale_every
lowerCamelCase_ : str = use_cache
lowerCamelCase_ : Optional[int] = bos_token_id
lowerCamelCase_ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 318 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase , _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : str = 0
lowerCamelCase_ : Union[str, Any] = len(_lowercase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase_ : Any = i + 1
else:
lowerCamelCase_ : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 318 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowercase ( _lowercase ):
def __init__(self , A=0.01 , A=1_0_0_0 ):
lowerCamelCase_ : Tuple = p_stop
lowerCamelCase_ : List[Any] = max_length
def __iter__(self ):
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCamelCase_ : List[str] = random.random() < self.p_stop
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self , A , A , A=False , A=True ):
lowerCamelCase_ : Dict = [
BatchSamplerShard(A , 2 , A , split_batches=A , even_batches=A )
for i in range(2 )
]
lowerCamelCase_ : Dict = [list(A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A ) for shard in batch_sampler_shards] , [len(A ) for e in expected] )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
# Check the shards when the dataset is a round multiple of total batch size.
lowerCamelCase_ : Dict = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(A , A )
lowerCamelCase_ : str = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCamelCase_ : str = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(A , A )
lowerCamelCase_ : List[Any] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCamelCase_ : List[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(A , A )
lowerCamelCase_ : Any = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCamelCase_ : List[str] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(A , A )
lowerCamelCase_ : Tuple = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is very small.
lowerCamelCase_ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A , A )
lowerCamelCase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : List[str] = [[], []]
self.check_batch_sampler_shards(A , A )
def UpperCAmelCase__ (self ):
# Check the shards when the dataset is a round multiple of batch size.
lowerCamelCase_ : Any = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowerCamelCase_ : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCamelCase_ : Union[str, Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowerCamelCase_ : Tuple = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCamelCase_ : Tuple = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowerCamelCase_ : str = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is very small.
lowerCamelCase_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : str = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowerCamelCase_ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Dict = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A )
def UpperCAmelCase__ (self ):
# Check the shards when the dataset is a round multiple of total batch size.
lowerCamelCase_ : Optional[int] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowerCamelCase_ : str = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCamelCase_ : int = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowerCamelCase_ : Optional[int] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCamelCase_ : Any = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowerCamelCase_ : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCamelCase_ : Optional[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowerCamelCase_ : Optional[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is very small.
lowerCamelCase_ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowerCamelCase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowerCamelCase_ : List[str] = [[], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
def UpperCAmelCase__ (self ):
# Check the shards when the dataset is a round multiple of batch size.
lowerCamelCase_ : Optional[int] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowerCamelCase_ : Dict = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCamelCase_ : Any = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowerCamelCase_ : int = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCamelCase_ : List[str] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowerCamelCase_ : Any = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is very small.
lowerCamelCase_ : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowerCamelCase_ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Any = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
lowerCamelCase_ : Any = [BatchSamplerShard(A , 2 , A , even_batches=A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def UpperCAmelCase__ (self , A , A , A , A=False , A=2 , A=False ):
random.seed(A )
lowerCamelCase_ : Dict = list(A )
lowerCamelCase_ : Optional[Any] = [
IterableDatasetShard(
A , batch_size=A , drop_last=A , num_processes=A , process_index=A , split_batches=A , )
for i in range(A )
]
lowerCamelCase_ : Tuple = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A )
iterable_dataset_lists.append(list(A ) )
lowerCamelCase_ : int = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCamelCase_ : List[str] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(len(A ) % shard_batch_size == 0 )
lowerCamelCase_ : List[Any] = []
for idx in range(0 , len(A ) , A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A ) < len(A ):
reference += reference
self.assertListEqual(A , reference[: len(A )] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = 4_2
lowerCamelCase_ : Dict = RandomIterableDataset()
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
# Edge case with a very small dataset
lowerCamelCase_ : Union[str, Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=A )
lowerCamelCase_ : Any = SkipBatchSampler(A , 2 )
self.assertListEqual(list(A ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = DataLoader(list(range(1_6 ) ) , batch_size=4 )
lowerCamelCase_ : Dict = skip_first_batches(A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def UpperCAmelCase__ (self ):
Accelerator()
lowerCamelCase_ : Union[str, Any] = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 318 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''ZinengTang/tvlt-base'''
lowerCamelCase_ : Union[str, Any] = tempfile.mkdtemp()
def UpperCAmelCase__ (self , **A ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **A )
def UpperCAmelCase__ (self , **A ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = self.get_feature_extractor()
lowerCamelCase_ : Union[str, Any] = TvltProcessor(image_processor=A , feature_extractor=A )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , A )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_feature_extractor()
lowerCamelCase_ : Union[str, Any] = TvltProcessor(image_processor=A , feature_extractor=A )
lowerCamelCase_ : Union[str, Any] = np.ones([1_2_0_0_0] )
lowerCamelCase_ : str = feature_extractor(A , return_tensors='''np''' )
lowerCamelCase_ : int = processor(audio=A , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_feature_extractor()
lowerCamelCase_ : List[str] = TvltProcessor(image_processor=A , feature_extractor=A )
lowerCamelCase_ : str = np.ones([3, 2_2_4, 2_2_4] )
lowerCamelCase_ : Optional[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : str = processor(images=A , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_feature_extractor()
lowerCamelCase_ : str = TvltProcessor(image_processor=A , feature_extractor=A )
lowerCamelCase_ : Optional[int] = np.ones([1_2_0_0_0] )
lowerCamelCase_ : int = np.ones([3, 2_2_4, 2_2_4] )
lowerCamelCase_ : Optional[Any] = processor(audio=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_feature_extractor()
lowerCamelCase_ : List[Any] = TvltProcessor(image_processor=A , feature_extractor=A )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 318 |
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318 | 1 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
if not is_accelerate_available():
return method
lowerCamelCase_ : List[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *_lowercase , **_lowercase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowercase , **_lowercase )
return wrapper
| 318 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 |
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Dict = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
lowerCamelCase_ : Dict = -1
lowerCamelCase_ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
lowerCamelCase_ : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
lowerCamelCase_ : str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase_ : List[Any] = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
lowerCamelCase_ : Tuple = model.generate(A , max_new_tokens=1_0 , do_sample=A )
lowerCamelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
lowerCamelCase_ : Union[str, Any] = TextIteratorStreamer(A )
lowerCamelCase_ : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase_ : List[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
lowerCamelCase_ : Dict = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
lowerCamelCase_ : List[Any] = -1
lowerCamelCase_ : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
lowerCamelCase_ : Optional[int] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
lowerCamelCase_ : List[Any] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase_ : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase_ : Optional[int] = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase_ : Optional[Any] = cs.out[:-1]
self.assertEqual(A , A )
def UpperCAmelCase__ (self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
lowerCamelCase_ : Tuple = -1
lowerCamelCase_ : str = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase_ : List[str] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase_ : Union[str, Any] = cs.out[:-1] # Remove the final "\n"
lowerCamelCase_ : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
lowerCamelCase_ : str = -1
lowerCamelCase_ : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
lowerCamelCase_ : int = TextIteratorStreamer(A , timeout=0.0_01 )
lowerCamelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase_ : int = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
lowerCamelCase_ : Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 318 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318 | 1 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase_ ( _lowercase , _lowercase , _lowercase=1e-12 ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : List[str] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_lowercase , axis=1 ) , a_min=_lowercase ) ).T
lowerCamelCase_ : int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_lowercase , axis=1 ) , a_min=_lowercase ) ).T
return jnp.matmul(_lowercase , norm_emb_a.T )
class __lowercase ( nn.Module ):
lowerCamelCase : CLIPConfig
lowerCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = FlaxCLIPVisionModule(self.config.vision_config )
lowerCamelCase_ : Optional[int] = nn.Dense(self.config.projection_dim , use_bias=A , dtype=self.dtype )
lowerCamelCase_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
lowerCamelCase_ : Union[str, Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCamelCase_ : str = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (1_7,) )
lowerCamelCase_ : List[Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self , A ):
lowerCamelCase_ : List[str] = self.vision_model(A )[1]
lowerCamelCase_ : Any = self.visual_projection(A )
lowerCamelCase_ : List[str] = jax_cosine_distance(A , self.special_care_embeds )
lowerCamelCase_ : Optional[int] = jax_cosine_distance(A , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCamelCase_ : Any = 0.0
lowerCamelCase_ : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCamelCase_ : Optional[Any] = jnp.round(A , 3 )
lowerCamelCase_ : List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=A )
# Use a lower threshold if an image has any special care concept
lowerCamelCase_ : Optional[int] = is_special_care * 0.01
lowerCamelCase_ : Optional[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCamelCase_ : Optional[int] = jnp.round(A , 3 )
lowerCamelCase_ : Tuple = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = CLIPConfig
lowerCamelCase : Union[str, Any] = "clip_input"
lowerCamelCase : str = FlaxStableDiffusionSafetyCheckerModule
def __init__(self , A , A = None , A = 0 , A = jnp.floataa , A = True , **A , ):
if input_shape is None:
lowerCamelCase_ : Dict = (1, 2_2_4, 2_2_4, 3)
lowerCamelCase_ : Tuple = self.module_class(config=A , dtype=A , **A )
super().__init__(A , A , input_shape=A , seed=A , dtype=A , _do_init=_do_init )
def UpperCAmelCase__ (self , A , A , A = None ):
# init input tensor
lowerCamelCase_ : str = jax.random.normal(A , A )
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = jax.random.split(A )
lowerCamelCase_ : str = {'''params''': params_rng, '''dropout''': dropout_rng}
lowerCamelCase_ : Optional[Any] = self.module.init(A , A )['''params''']
return random_params
def __call__(self , A , A = None , ):
lowerCamelCase_ : Optional[int] = jnp.transpose(A , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(A , dtype=jnp.floataa ) , rngs={} , )
| 318 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318 | 1 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase_ ( _lowercase ) -> float:
'''simple docstring'''
return np.dot(_lowercase , _lowercase )
class __lowercase :
def __init__(self , *,
A = np.inf , A = "linear" , A = 0.0 , ):
lowerCamelCase_ : Optional[int] = regularization
lowerCamelCase_ : List[str] = gamma
if kernel == "linear":
lowerCamelCase_ : Any = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowerCamelCase_ : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase_ : int = F"""Unknown kernel: {kernel}"""
raise ValueError(A )
def UpperCAmelCase__ (self , A , A ):
return np.dot(A , A )
def UpperCAmelCase__ (self , A , A ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : str = observations
lowerCamelCase_ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCamelCase_), ) : Union[str, Any] = np.shape(A )
def to_minimize(A ) -> float:
lowerCamelCase_ : List[Any] = 0
((lowerCamelCase_), ) : Union[str, Any] = np.shape(A )
for i in range(A ):
for j in range(A ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(A )
lowerCamelCase_ : Dict = LinearConstraint(A , 0 , 0 )
lowerCamelCase_ : Dict = Bounds(0 , self.regularization )
lowerCamelCase_ : int = minimize(
A , np.ones(A ) , bounds=A , constraints=[ly_contraint] ).x
lowerCamelCase_ : List[Any] = l_star
# calculating mean offset of separation plane to points
lowerCamelCase_ : Union[str, Any] = 0
for i in range(A ):
for j in range(A ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowerCamelCase_ : int = s / n
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , A )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "microsoft/speecht5_tts"
lowerCamelCase : Dict = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
lowerCamelCase : Union[str, Any] = "text_reader"
lowerCamelCase : Dict = SpeechTaProcessor
lowerCamelCase : Optional[int] = SpeechTaForTextToSpeech
lowerCamelCase : Tuple = SpeechTaHifiGan
lowerCamelCase : List[str] = ["text"]
lowerCamelCase : List[str] = ["audio"]
def UpperCAmelCase__ (self ):
if self.post_processor is None:
lowerCamelCase_ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCAmelCase__ (self , A , A=None ):
lowerCamelCase_ : List[str] = self.pre_processor(text=A , return_tensors='''pt''' , truncation=A )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
lowerCamelCase_ : Tuple = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
lowerCamelCase_ : Optional[int] = torch.tensor(embeddings_dataset[7_3_0_5]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase__ (self , A ):
with torch.no_grad():
return self.model.generate_speech(**A )
def UpperCAmelCase__ (self , A ):
with torch.no_grad():
return self.post_processor(A ).cpu().detach()
| 318 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = XLNetTokenizer
lowerCamelCase : Optional[int] = XLNetTokenizerFast
lowerCamelCase : str = True
lowerCamelCase : Dict = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Union[str, Any] = XLNetTokenizer(A , keep_accents=A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''<s>'''
lowerCamelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(A ) , 1_0_0_6 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = XLNetTokenizer(A , keep_accents=A )
lowerCamelCase_ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
lowerCamelCase_ : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCamelCase_ : List[str] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
lowerCamelCase_ : Dict = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = XLNetTokenizer(A , do_lower_case=A )
lowerCamelCase_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = XLNetTokenizer(A , do_lower_case=A )
lowerCamelCase_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
lowerCamelCase_ : Any = tokenizer.encode('''sequence builders''' , add_special_tokens=A )
lowerCamelCase_ : Dict = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A )
lowerCamelCase_ : Any = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : Tuple = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 318 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 | 1 |
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(_lowercase , _lowercase ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
lowerCamelCase_ : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCamelCase_ : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowercase_ ( _lowercase , _lowercase , _lowercase = "x" , _lowercase = 10**-10 , _lowercase = 1 , ) -> complex:
'''simple docstring'''
lowerCamelCase_ : List[str] = symbols(_lowercase )
lowerCamelCase_ : Tuple = lambdify(_lowercase , _lowercase )
lowerCamelCase_ : str = lambdify(_lowercase , diff(_lowercase , _lowercase ) )
lowerCamelCase_ : List[Any] = starting_point
while True:
if diff_function(_lowercase ) != 0:
lowerCamelCase_ : Union[str, Any] = prev_guess - multiplicity * func(_lowercase ) / diff_function(
_lowercase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCamelCase_ : Dict = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 318 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : Optional[int] = cst_fwd.get(_lowercase , np.inf )
lowerCamelCase_ : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase_ : Dict = new_cost_f
lowerCamelCase_ : Union[str, Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Dict = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = -1
lowerCamelCase_ : str = set()
lowerCamelCase_ : int = set()
lowerCamelCase_ : List[Any] = {source: 0}
lowerCamelCase_ : Optional[int] = {destination: 0}
lowerCamelCase_ : int = {source: None}
lowerCamelCase_ : Union[str, Any] = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : Union[str, Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_, lowerCamelCase_ : Any = queue_forward.get()
visited_forward.add(_lowercase )
lowerCamelCase_, lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(_lowercase )
lowerCamelCase_ : List[Any] = pass_and_relaxation(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )
lowerCamelCase_ : str = pass_and_relaxation(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Any = shortest_distance
return shortest_path_distance
__lowercase : int = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowercase : List[Any] = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 |
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 1 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase_ ( _lowercase , _lowercase="shi-labs/oneformer_demo" ) -> Any:
'''simple docstring'''
with open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) as f:
lowerCamelCase_ : Optional[Any] = json.load(_lowercase )
lowerCamelCase_ : Dict = {}
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Union[str, Any] = []
for key, info in class_info.items():
lowerCamelCase_ : List[Any] = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(_lowercase ) )
lowerCamelCase_ : Tuple = thing_ids
lowerCamelCase_ : List[Any] = class_names
return metadata
class __lowercase ( unittest.TestCase ):
def __init__(self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=None , A=True , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=1_0 , A=False , A=2_5_5 , A="shi-labs/oneformer_demo" , A="ade20k_panoptic.json" , A=1_0 , ):
lowerCamelCase_ : str = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : Dict = num_channels
lowerCamelCase_ : Tuple = min_resolution
lowerCamelCase_ : List[Any] = max_resolution
lowerCamelCase_ : List[Any] = do_resize
lowerCamelCase_ : Tuple = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
lowerCamelCase_ : Union[str, Any] = do_normalize
lowerCamelCase_ : str = image_mean
lowerCamelCase_ : Optional[Any] = image_std
lowerCamelCase_ : Dict = class_info_file
lowerCamelCase_ : Any = prepare_metadata(A , A )
lowerCamelCase_ : Dict = num_text
lowerCamelCase_ : Optional[int] = repo_path
# for the post_process_functions
lowerCamelCase_ : List[str] = 2
lowerCamelCase_ : List[Any] = 1_0
lowerCamelCase_ : List[str] = 1_0
lowerCamelCase_ : Any = 3
lowerCamelCase_ : List[Any] = 4
lowerCamelCase_ : Dict = num_labels
lowerCamelCase_ : Optional[int] = do_reduce_labels
lowerCamelCase_ : Optional[Any] = ignore_index
def UpperCAmelCase__ (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCAmelCase__ (self , A , A=False ):
if not batched:
lowerCamelCase_ : Any = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase_, lowerCamelCase_ : Optional[int] = image.size
else:
lowerCamelCase_, lowerCamelCase_ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ : Tuple = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase_ : Tuple = self.size['''shortest_edge''']
lowerCamelCase_ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ : Tuple = self.size['''shortest_edge''']
lowerCamelCase_ : Optional[int] = self.size['''shortest_edge''']
else:
lowerCamelCase_ : Union[str, Any] = []
for image in image_inputs:
lowerCamelCase_, lowerCamelCase_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ : Union[str, Any] = max(A , key=lambda A : item[0] )[0]
lowerCamelCase_ : Optional[int] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
def UpperCAmelCase__ (self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCamelCase : Any = image_processing_class
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = OneFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ (self ):
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''ignore_index''' ) )
self.assertTrue(hasattr(A , '''class_info_file''' ) )
self.assertTrue(hasattr(A , '''num_text''' ) )
self.assertTrue(hasattr(A , '''repo_path''' ) )
self.assertTrue(hasattr(A , '''metadata''' ) )
self.assertTrue(hasattr(A , '''do_reduce_labels''' ) )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
# Initialize image_processor
lowerCamelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Tuple = self.image_processing_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : List[str] = image_processor(
A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processor
lowerCamelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : str = self.image_processing_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Union[str, Any] = image_processor(
A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processor
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Optional[int] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : int = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Dict = self.image_processing_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Dict = image_processor(
A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self , A=False , A=False , A="np" ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowerCamelCase_ : str = self.image_processing_tester.num_labels
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : Union[str, Any] = None
lowerCamelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
if with_segmentation_maps:
lowerCamelCase_ : Any = num_labels
if is_instance_map:
lowerCamelCase_ : Union[str, Any] = list(range(A ) ) * 2
lowerCamelCase_ : int = dict(enumerate(A ) )
lowerCamelCase_ : Optional[int] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowerCamelCase_ : List[str] = [Image.fromarray(A ) for annotation in annotations]
lowerCamelCase_ : Any = image_processor(
A , ['''semantic'''] * len(A ) , A , return_tensors='''pt''' , instance_id_to_semantic_id=A , pad_and_return_pixel_mask=A , )
return inputs
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
def common(A=False , A=None ):
lowerCamelCase_ : Union[str, Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=A , is_instance_map=A , segmentation_type=A )
lowerCamelCase_ : Union[str, Any] = inputs['''mask_labels''']
lowerCamelCase_ : str = inputs['''class_labels''']
lowerCamelCase_ : Union[str, Any] = inputs['''pixel_values''']
lowerCamelCase_ : str = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(A , A , A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=A )
common(is_instance_map=A , segmentation_type='''pil''' )
common(is_instance_map=A , segmentation_type='''pil''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = np.zeros((2_0, 5_0) )
lowerCamelCase_ : Any = 1
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : Union[str, Any] = binary_mask_to_rle(A )
self.assertEqual(len(A ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowerCamelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(A )
self.assertEqual(len(A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowerCamelCase_ : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowerCamelCase_ : Any = fature_extractor.post_process_semantic_segmentation(A , target_sizes=A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowerCamelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase_ : List[Any] = image_processor.post_process_instance_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowerCamelCase_ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase_ : Dict = image_processor.post_process_panoptic_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 318 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : Optional[int] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__lowercase : Tuple = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__lowercase : Union[str, Any] = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__lowercase : int = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCAmelCase__ (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ (self , A , A , A=False ):
if return_pvalue:
lowerCamelCase_ : Any = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 318 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318 | 1 |
def _a ( a :int ) -> list[int]:
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
a = [True] * (num + 1)
a = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a ):
a = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
@dataclass
class __A ( UpperCamelCase__ ):
a__ : str = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__(self : List[str] , **__a : Tuple ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase_ = deprecated_arg[3:]
setattr(self , __a , not kwargs.pop(__a ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCAmelCase_ = kwargs.pop("torchscript" , self.torchscript )
UpperCAmelCase_ = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
UpperCAmelCase_ = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**__a )
a__ : bool = field(default=UpperCamelCase__ , metadata={"""help""": """Trace the models using torchscript"""} )
a__ : bool = field(default=UpperCamelCase__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
a__ : str = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def _lowercase (self : int ):
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
UpperCAmelCase_ = torch.device("cpu" )
UpperCAmelCase_ = 0
elif is_torch_tpu_available():
UpperCAmelCase_ = xm.xla_device()
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
UpperCAmelCase_ = torch.cuda.device_count()
return device, n_gpu
@property
def _lowercase (self : Tuple ):
return is_torch_tpu_available() and self.tpu
@property
def _lowercase (self : int ):
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowercase (self : List[str] ):
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def _lowercase (self : Tuple ):
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def _lowercase (self : Optional[Any] ):
return self.n_gpu > 0
| 1 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ = [3, 3, 3, 3]
lowercase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ = [4, 4, 4, 4]
lowercase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ = [3, 3, 3, 3]
else:
lowercase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ = 96
elif "small" in model_name:
lowercase__ = 96
elif "base" in model_name:
lowercase__ = 128
elif "large" in model_name:
lowercase__ = 192
elif "xlarge" in model_name:
lowercase__ = 256
elif "huge" in model_name:
lowercase__ = 352
# set label information
lowercase__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ = '''imagenet-22k-id2label.json'''
else:
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(A ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = FocalNetConfig(
embed_dim=A , depths=A , focal_levels=A , focal_windows=A , use_conv_embed=A , idalabel=A , labelaid=A , use_post_layernorm=A , use_layerscale=A , )
return config
def _SCREAMING_SNAKE_CASE (A ) -> Dict:
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ = '''layernorm.bias'''
if "head" in name:
lowercase__ = name.replace('''head''' , '''classifier''' )
else:
lowercase__ = '''focalnet.''' + name
return name
def _SCREAMING_SNAKE_CASE (A , A , A=False ) -> Any:
"""simple docstring"""
lowercase__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , A )
lowercase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(A )
lowercase__ = val
lowercase__ = get_focalnet_config(A )
lowercase__ = FocalNetForImageClassification(A )
model.eval()
# load state dict
model.load_state_dict(A )
# verify conversion
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = BitImageProcessor(
do_resize=A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=A , crop_size=224 , do_normalize=A , image_mean=A , image_std=A , )
lowercase__ = Image.open(requests.get(A , stream=A ).raw )
lowercase__ = processor(images=A , return_tensors='''pt''' )
lowercase__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ = image_transforms(A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , A , atol=1E-4 )
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowercase__ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowercase__ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowercase__ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowercase__ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 2 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318 | 0 |
'''simple docstring'''
import sys
lowercase : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCAmelCase_ ( snake_case__ = N ):
'''simple docstring'''
A : List[Any] = -sys.maxsize - 1
for i in range(len(snake_case__ ) - 12 ):
A : Union[str, Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A : Optional[int] = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 3 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case ={
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
__snake_case ={
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
__snake_case ={
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
__snake_case ={
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
__snake_case ={
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
__snake_case ={
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def a_ ( lowerCamelCase : Tuple ):
if isinstance(lowerCamelCase , lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=False ):
lowerCAmelCase = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCAmelCase = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def a_ ( lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]=None ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCAmelCase = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCAmelCase = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def a_ ( lowerCamelCase : str , lowerCamelCase : List[str] ):
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
lowerCAmelCase = {}
lowerCAmelCase = checkpoint['time_embed.0.weight']
lowerCAmelCase = checkpoint['time_embed.0.bias']
lowerCAmelCase = checkpoint['time_embed.2.weight']
lowerCAmelCase = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase = checkpoint['label_emb.weight']
lowerCAmelCase = checkpoint['input_blocks.0.0.weight']
lowerCAmelCase = checkpoint['input_blocks.0.0.bias']
lowerCAmelCase = unet_config['down_block_types']
lowerCAmelCase = unet_config['layers_per_block']
lowerCAmelCase = unet_config['attention_head_dim']
lowerCAmelCase = unet_config['block_out_channels']
lowerCAmelCase = 1
lowerCAmelCase = channels_list[0]
for i, layer_type in enumerate(lowerCamelCase ):
lowerCAmelCase = channels_list[i]
lowerCAmelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCamelCase ):
lowerCAmelCase = f'''down_blocks.{i}.resnets.{j}'''
lowerCAmelCase = f'''input_blocks.{current_layer}.0'''
lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCamelCase ):
lowerCAmelCase = f'''down_blocks.{i}.resnets.{j}'''
lowerCAmelCase = f'''input_blocks.{current_layer}.0'''
lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
lowerCAmelCase = f'''down_blocks.{i}.attentions.{j}'''
lowerCAmelCase = f'''input_blocks.{current_layer}.1'''
lowerCAmelCase = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
current_layer += 1
if i != len(lowerCamelCase ) - 1:
lowerCAmelCase = f'''down_blocks.{i}.downsamplers.0'''
lowerCAmelCase = f'''input_blocks.{current_layer}.0'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
current_layer += 1
lowerCAmelCase = current_channels
# hardcoded the mid-block for now
lowerCAmelCase = 'mid_block.resnets.0'
lowerCAmelCase = 'middle_block.0'
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 'mid_block.attentions.0'
lowerCAmelCase = 'middle_block.1'
lowerCAmelCase = convert_attention(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 'mid_block.resnets.1'
lowerCAmelCase = 'middle_block.2'
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 0
lowerCAmelCase = unet_config['up_block_types']
for i, layer_type in enumerate(lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase = f'''up_blocks.{i}.resnets.{j}'''
lowerCAmelCase = f'''output_blocks.{current_layer}.0'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
current_layer += 1
if i != len(lowerCamelCase ) - 1:
lowerCAmelCase = f'''up_blocks.{i}.upsamplers.0'''
lowerCAmelCase = f'''output_blocks.{current_layer-1}.1'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase = f'''up_blocks.{i}.resnets.{j}'''
lowerCAmelCase = f'''output_blocks.{current_layer}.0'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
lowerCAmelCase = f'''up_blocks.{i}.attentions.{j}'''
lowerCAmelCase = f'''output_blocks.{current_layer}.1'''
lowerCAmelCase = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
current_layer += 1
if i != len(lowerCamelCase ) - 1:
lowerCAmelCase = f'''up_blocks.{i}.upsamplers.0'''
lowerCAmelCase = f'''output_blocks.{current_layer-1}.2'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = checkpoint['out.0.weight']
lowerCAmelCase = checkpoint['out.0.bias']
lowerCAmelCase = checkpoint['out.2.weight']
lowerCAmelCase = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
__snake_case =parser.parse_args()
__snake_case =strabool(args.class_cond)
__snake_case =os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case =TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case =None
__snake_case =con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case =CMStochasticIterativeScheduler(**scheduler_config)
__snake_case =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 4 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318 | 0 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
_lowercase =''''''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return data[1:] + data[0]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
_lowercase =''''''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =int('''0b''' + data[0] + data[-1] , 2 )
_lowercase =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
_lowercase =message[:4]
_lowercase =message[4:]
_lowercase =apply_table(__snake_case , __snake_case )
_lowercase =xor(__snake_case , __snake_case )
_lowercase =apply_sbox(__snake_case , temp[:4] ) # noqa: E741
_lowercase =apply_sbox(__snake_case , temp[4:] )
_lowercase ='''0''' * (2 - len(__snake_case )) + l # noqa: E741
_lowercase ='''0''' * (2 - len(__snake_case )) + r
_lowercase =apply_table(l + r , __snake_case )
_lowercase =xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
UpperCAmelCase__ = input('''Enter 10 bit key: ''')
UpperCAmelCase__ = input('''Enter 8 bit message: ''')
UpperCAmelCase__ = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCAmelCase__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCAmelCase__ = [2, 4, 3, 1]
UpperCAmelCase__ = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCAmelCase__ = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCAmelCase__ = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCAmelCase__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCAmelCase__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCAmelCase__ = apply_table(key, paa_table)
UpperCAmelCase__ = temp[:5]
UpperCAmelCase__ = temp[5:]
UpperCAmelCase__ = left_shift(left)
UpperCAmelCase__ = left_shift(right)
UpperCAmelCase__ = apply_table(left + right, pa_table)
UpperCAmelCase__ = left_shift(left)
UpperCAmelCase__ = left_shift(right)
UpperCAmelCase__ = left_shift(left)
UpperCAmelCase__ = left_shift(right)
UpperCAmelCase__ = apply_table(left + right, pa_table)
# encryption
UpperCAmelCase__ = apply_table(message, IP)
UpperCAmelCase__ = function(expansion, sa, sa, keya, temp)
UpperCAmelCase__ = temp[4:] + temp[:4]
UpperCAmelCase__ = function(expansion, sa, sa, keya, temp)
UpperCAmelCase__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
UpperCAmelCase__ = apply_table(CT, IP)
UpperCAmelCase__ = function(expansion, sa, sa, keya, temp)
UpperCAmelCase__ = temp[4:] + temp[:4]
UpperCAmelCase__ = function(expansion, sa, sa, keya, temp)
UpperCAmelCase__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 5 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __lowerCAmelCase ( a__ ) -> Dict:
__a = botoa.client('''iam''' )
__a = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a__ , AssumeRolePolicyDocument=json.dumps(a__ , indent=2 ) )
__a = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a__ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(a__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = botoa.client('''iam''' )
return iam_client.get_role(RoleName=a__ )["Role"]["Arn"]
def __lowerCAmelCase ( ) -> Tuple:
__a = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , a__ , )
__a = None
if credentials_configuration == 0:
__a = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
__a = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
__a = _ask_field('''AWS Access Key ID: ''' )
__a = aws_access_key_id
__a = _ask_field('''AWS Secret Access Key: ''' )
__a = aws_secret_access_key
__a = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
__a = aws_region
__a = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , a__ , )
if role_management == 0:
__a = _ask_field('''Enter your IAM role name: ''' )
else:
__a = '''accelerate_sagemaker_execution_role'''
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(a__ )
__a = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = None
if is_custom_docker_image:
__a = _ask_field('''Enter your Docker image: ''' , lambda a__ : str(a__ ).lower() )
__a = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda a__ : str(a__ ).lower() , )
__a = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda a__ : str(a__ ).lower() , )
__a = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
__a = {}
__a = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
__a = '''dynamo_'''
__a = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__a = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
__a = _ask_options(
'''Which mode do you want to use?''' , a__ , lambda a__ : TORCH_DYNAMO_MODES[int(a__ )] , default='''default''' , )
__a = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
a__ , a__ , lambda a__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(a__ , lambda a__ : str(a__ ).lower() , default='''ml.p3.2xlarge''' )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
'''How many machines do you want use? [1]: ''' , a__ , default=1 , )
__a = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=a__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=a__ , use_cpu=a__ , dynamo_config=a__ , eca_instance_type=a__ , profile=a__ , region=a__ , iam_role_name=a__ , mixed_precision=a__ , num_machines=a__ , sagemaker_inputs_file=a__ , sagemaker_metrics_file=a__ , )
| 6 |
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318 | 0 |
from ...configuration_utils import PretrainedConfig
lowercase_ = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'tapas'
def __init__( self : Optional[Any],lowercase_ : Union[str, Any]=3_0_5_2_2,lowercase_ : List[str]=7_6_8,lowercase_ : int=1_2,lowercase_ : Optional[Any]=1_2,lowercase_ : str=3_0_7_2,lowercase_ : Optional[Any]="gelu",lowercase_ : Union[str, Any]=0.1,lowercase_ : str=0.1,lowercase_ : Optional[int]=1_0_2_4,lowercase_ : int=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0],lowercase_ : Any=0.02,lowercase_ : int=1E-12,lowercase_ : Union[str, Any]=0,lowercase_ : Dict=10.0,lowercase_ : Any=0,lowercase_ : Any=1.0,lowercase_ : Any=None,lowercase_ : Optional[Any]=1.0,lowercase_ : Tuple=False,lowercase_ : Any=None,lowercase_ : Optional[int]=1.0,lowercase_ : Dict=1.0,lowercase_ : Any=False,lowercase_ : Optional[int]=False,lowercase_ : Union[str, Any]="ratio",lowercase_ : Tuple=None,lowercase_ : Tuple=None,lowercase_ : Tuple=6_4,lowercase_ : Optional[int]=3_2,lowercase_ : Dict=False,lowercase_ : List[Any]=True,lowercase_ : Optional[int]=False,lowercase_ : Optional[Any]=False,lowercase_ : List[Any]=True,lowercase_ : List[Any]=False,lowercase_ : int=None,lowercase_ : Optional[int]=None,**lowercase_ : int,)-> str:
'''simple docstring'''
super().__init__(pad_token_id=lowercase_,**lowercase_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_sizes
A__ = initializer_range
A__ = layer_norm_eps
# Fine-tuning task hyperparameters
A__ = positive_label_weight
A__ = num_aggregation_labels
A__ = aggregation_loss_weight
A__ = use_answer_as_supervision
A__ = answer_loss_importance
A__ = use_normalized_answer_loss
A__ = huber_loss_delta
A__ = temperature
A__ = aggregation_temperature
A__ = use_gumbel_for_cells
A__ = use_gumbel_for_aggregation
A__ = average_approximation_function
A__ = cell_selection_preference
A__ = answer_loss_cutoff
A__ = max_num_rows
A__ = max_num_columns
A__ = average_logits_per_cell
A__ = select_one_column
A__ = allow_empty_column_selection
A__ = init_cell_selection_weights_to_zero
A__ = reset_position_index_per_cell
A__ = disable_per_token_loss
# Aggregation hyperparameters
A__ = aggregation_labels
A__ = no_aggregation_label_index
if isinstance(self.aggregation_labels,lowercase_ ):
A__ = {int(lowercase_ ): v for k, v in aggregation_labels.items()}
| 7 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , _UpperCamelCase : str="</s>" , _UpperCamelCase : List[Any]="<unk>" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : Optional[int]=1_2_5 , _UpperCamelCase : Dict=None , **_UpperCamelCase : Tuple , ) ->None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = extra_ids
snake_case_ = 2**8 # utf is 8 bits
# define special tokens dict
snake_case_ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case_ = len(self.special_tokens_encoder )
snake_case_ = len(_UpperCamelCase )
for i, token in enumerate(_UpperCamelCase ):
snake_case_ = self.vocab_size + i - n
snake_case_ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case__( self : Optional[int] ) ->List[str]:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : Union[str, Any] , _UpperCamelCase : List[int] ) ->List[int]:
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]:
snake_case_ = [chr(_UpperCamelCase ) for i in text.encode('''utf-8''' )]
return tokens
def snake_case__( self : List[str] , _UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
if token in self.special_tokens_encoder:
snake_case_ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case_ = self.added_tokens_encoder[token]
elif len(_UpperCamelCase ) != 1:
snake_case_ = self.unk_token_id
else:
snake_case_ = ord(_UpperCamelCase ) + self._num_special_tokens
return token_id
def snake_case__( self : int , _UpperCamelCase : Tuple ) ->Union[str, Any]:
if index in self.special_tokens_decoder:
snake_case_ = self.special_tokens_decoder[index]
else:
snake_case_ = chr(index - self._num_special_tokens )
return token
def snake_case__( self : int , _UpperCamelCase : List[Any] ) ->str:
snake_case_ = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
snake_case_ = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
snake_case_ = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
snake_case_ = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
snake_case_ = token.encode('''utf-8''' )
else:
snake_case_ = bytes([ord(_UpperCamelCase )] )
bstring += tok_string
snake_case_ = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
return ()
| 8 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__lowerCAmelCase : Dict ={
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__lowerCAmelCase : Optional[int] ={'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : List[Any] = bs[:]
__SCREAMING_SNAKE_CASE : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : str = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : int = set()
__SCREAMING_SNAKE_CASE : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : str = char
return pairs
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]="replace" , lowerCAmelCase__ :List[str]="<s>" , lowerCAmelCase__ :Union[str, Any]="</s>" , lowerCAmelCase__ :Optional[Any]="</s>" , lowerCAmelCase__ :Optional[Any]="<s>" , lowerCAmelCase__ :str="<unk>" , lowerCAmelCase__ :int="<pad>" , lowerCAmelCase__ :Tuple="<mask>" , lowerCAmelCase__ :int=False , **lowerCAmelCase__ :int , ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
__SCREAMING_SNAKE_CASE : str = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : int = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as merges_handle:
__SCREAMING_SNAKE_CASE : Tuple = merges_handle.read().split('''\n''' )[1:-1]
__SCREAMING_SNAKE_CASE : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : Any = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __magic_name__( self :Optional[int] ) -> Tuple:
return len(self.encoder )
def __magic_name__( self :Optional[Any] ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__( self :Dict , lowerCAmelCase__ :Optional[Any] ) -> Dict:
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : List[Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = bigram
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Tuple = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Any = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Tuple = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = ''' '''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = word
return word
def __magic_name__( self :Any , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(''' ''' ) )
return bpe_tokens
def __magic_name__( self :str , lowerCAmelCase__ :List[str] ) -> int:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__SCREAMING_SNAKE_CASE : Any = token_index
writer.write(''' '''.join(lowerCAmelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__( self :Any , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False , **lowerCAmelCase__ :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : List[Any] = ''' ''' + text
return (text, kwargs)
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> Tuple:
return token_ids_a + [self.eos_token_id]
def __magic_name__( self :Any , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = ''' '''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : Optional[int] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 9 |
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Optional[int]) ->Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase__: int =torchvision.models.resnetaaa(pretrained=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =list(model.children())[:-2]
lowerCamelCase__: Optional[int] =nn.Sequential(*UpperCAmelCase_)
lowerCamelCase__: Dict =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.pool(self.model(UpperCAmelCase_))
lowerCamelCase__: str =torch.flatten(UpperCAmelCase_ , start_dim=2)
lowerCamelCase__: int =out.transpose(1 , 2).contiguous()
return out # BxNx2048
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =[json.loads(UpperCAmelCase_) for l in open(UpperCAmelCase_)]
lowerCamelCase__: Tuple =os.path.dirname(UpperCAmelCase_)
lowerCamelCase__: Any =tokenizer
lowerCamelCase__: List[str] =labels
lowerCamelCase__: int =len(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =max_seq_length
lowerCamelCase__: str =transforms
def __len__(self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return len(self.data)
def __getitem__(self : Tuple , UpperCAmelCase_ : Dict) ->int:
'''simple docstring'''
lowerCamelCase__: Optional[int] =torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=UpperCAmelCase_))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase__: Union[str, Any] =sentence[: self.max_seq_length]
lowerCamelCase__: str =torch.zeros(self.n_classes)
lowerCamelCase__: str =1
lowerCamelCase__: List[Any] =Image.open(os.path.join(self.data_dir , self.data[index]["img"])).convert("RGB")
lowerCamelCase__: int =self.transforms(UpperCAmelCase_)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =Counter()
for row in self.data:
label_freqs.update(row["label"])
return label_freqs
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Tuple =[len(row["sentence"] ) for row in batch]
lowerCamelCase__ , lowerCamelCase__: Optional[int] =len(__a ), max(__a )
lowerCamelCase__: List[str] =torch.zeros(__a , __a , dtype=torch.long )
lowerCamelCase__: Optional[int] =torch.zeros(__a , __a , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__a , __a ) ):
lowerCamelCase__: Optional[int] =input_row["sentence"]
lowerCamelCase__: Dict =1
lowerCamelCase__: List[str] =torch.stack([row["image"] for row in batch] )
lowerCamelCase__: Optional[int] =torch.stack([row["label"] for row in batch] )
lowerCamelCase__: List[str] =torch.stack([row["image_start_token"] for row in batch] )
lowerCamelCase__: List[Any] =torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 10 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "gpt_neo"
__SCREAMING_SNAKE_CASE = ["past_key_values"]
__SCREAMING_SNAKE_CASE = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , __lowerCamelCase=5_0_2_5_7 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=2_4 , __lowerCamelCase=[[["global", "local"], 1_2]] , __lowerCamelCase=1_6 , __lowerCamelCase=None , __lowerCamelCase=2_5_6 , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0_2 , __lowerCamelCase=True , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=5_0_2_5_6 , **__lowerCamelCase , ) -> Union[str, Any]:
_A : str = vocab_size
_A : List[Any] = max_position_embeddings
_A : Union[str, Any] = hidden_size
_A : Dict = num_layers
_A : str = num_heads
_A : Optional[Any] = intermediate_size
_A : str = window_size
_A : Dict = activation_function
_A : List[str] = resid_dropout
_A : Union[str, Any] = embed_dropout
_A : Dict = attention_dropout
_A : int = classifier_dropout
_A : List[Any] = layer_norm_epsilon
_A : List[str] = initializer_range
_A : Any = use_cache
_A : Any = bos_token_id
_A : str = eos_token_id
_A : Optional[Any] = attention_types
_A : Dict = self.expand_attention_types_params(__lowerCamelCase)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
F"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument.")
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase)
@staticmethod
def _lowerCamelCase ( __lowerCamelCase) -> Any:
_A : List[Any] = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
import torch
_A : str = input.size()
_A : Any = len(UpperCamelCase__ )
_A : List[str] = shape[dimension]
_A : Optional[Any] = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_A : str = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode="floor" ) + 1
_A : Dict = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_A : str = [slice(UpperCamelCase__ )] * rank
_A : int = indices
_A : List[Any] = input[s]
_A : List[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
import torch
_A : Dict = torch.arange(1 , UpperCamelCase__ )
_A : List[str] = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_A : List[Any] = remainders == 0
_A : str = candidates[divisor_indices]
_A : Optional[int] = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode="floor" )
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
_A : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs")
_A : str = {0: "batch", 1: "past_sequence + sequence"}
else:
_A : Union[str, Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowerCamelCase ( self) -> int:
return self._config.num_heads
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
_A : Any = super(__lowerCamelCase , self).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase)
# We need to order the input in the way they appears in the forward()
_A : str = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_A , _A : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_A : Dict = seqlen + 2
_A : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_A : Dict = [
(torch.zeros(__lowerCamelCase), torch.zeros(__lowerCamelCase)) for _ in range(self.num_layers)
]
_A : Optional[Any] = common_inputs["attention_mask"]
if self.use_past:
_A : Optional[Any] = ordered_inputs["attention_mask"].dtype
_A : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase)] , dim=1)
return ordered_inputs
@property
def _lowerCamelCase ( self) -> int:
return 1_3
| 11 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318 | 0 |
from math import factorial
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: int ):
__lowerCamelCase = real
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [1] * rank
else:
__lowerCamelCase = rank
def __repr__( self: List[Any] ):
return (
F'{self.real}+'
F'{"+".join(str(UpperCamelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCamelCase_ )
def __add__( self: int , UpperCamelCase_: Tuple ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return Dual(self.real + other , self.duals )
__lowerCamelCase = self.duals.copy()
__lowerCamelCase = other.duals.copy()
if len(UpperCamelCase_ ) > len(UpperCamelCase_ ):
o_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
elif len(UpperCamelCase_ ) < len(UpperCamelCase_ ):
s_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
__lowerCamelCase = []
for i in range(len(UpperCamelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCamelCase_ )
UpperCAmelCase__ : Dict = __add__
def __sub__( self: List[str] , UpperCamelCase_: Dict ):
return self + other * -1
def __mul__( self: Any , UpperCamelCase_: Optional[Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCamelCase_ )
__lowerCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = __mul__
def __truediv__( self: Dict , UpperCamelCase_: Tuple ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCamelCase_ )
raise ValueError
def __floordiv__( self: int , UpperCamelCase_: int ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCamelCase_ )
raise ValueError
def __pow__( self: Tuple , UpperCamelCase_: str ):
if n < 0 or isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
__lowerCamelCase = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase__ ( A__ : List[str] , A__ : str , A__ : str ):
'''simple docstring'''
if not callable(A__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(A__ , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(A__ , A__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
__lowerCamelCase = Dual(A__ , 1 )
__lowerCamelCase = func(A__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 12 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318 | 0 |
from __future__ import annotations
from math import pi
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
A__ = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
A__ = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
A__ = val
return f[i][j]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
A__ = dp[i - 1][w_]
return dp[n][w_], dp
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
A__ = len(lowercase_ )
if num_items != len(lowercase_ ):
A__ = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(lowercase_ )} values"""
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
A__ = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowercase_ )
A__ , A__ = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : str = [3, 2, 4, 4]
_lowerCamelCase : Tuple = [4, 3, 2, 3]
_lowerCamelCase : int = 4
_lowerCamelCase : Any = 6
_lowerCamelCase : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowerCamelCase , _lowerCamelCase : str = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowerCamelCase , _lowerCamelCase : str = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 14 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "EncodecFeatureExtractor"
snake_case_ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Tuple ,A : Any ,A : List[str] ):
super().__init__(A ,A )
__A = self.feature_extractor
__A = False
def UpperCamelCase_ ( self : Optional[int] ,A : Tuple=None ,A : List[str]=None ,A : Union[str, Any]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=A ,language=A ,no_timestamps=A )
def __call__( self : int ,*A : Any ,**A : Any ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A ,**A )
__A = kwargs.pop("audio" ,A )
__A = kwargs.pop("sampling_rate" ,A )
__A = kwargs.pop("text" ,A )
if len(A ) > 0:
__A = args[0]
__A = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
__A = self.tokenizer(A ,**A )
if audio is not None:
__A = self.feature_extractor(A ,*A ,sampling_rate=A ,**A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__A = audio_inputs["padding_mask"]
return inputs
def UpperCamelCase_ ( self : List[Any] ,*A : Optional[Any] ,**A : Optional[int] ):
__A = kwargs.pop("audio" ,A )
__A = kwargs.pop("padding_mask" ,A )
if len(A ) > 0:
__A = args[0]
__A = args[1:]
if audio_values is not None:
return self._decode_audio(A ,padding_mask=A )
else:
return self.tokenizer.batch_decode(*A ,**A )
def UpperCamelCase_ ( self : List[Any] ,*A : Dict ,**A : Tuple ):
return self.tokenizer.decode(*A ,**A )
def UpperCamelCase_ ( self : int ,A : Union[str, Any] ,A : Optional = None ):
__A = to_numpy(A )
__A , __A , __A = audio_values.shape
if padding_mask is None:
return list(A )
__A = to_numpy(A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A = seq_len - padding_mask.shape[-1]
__A = 1 - self.feature_extractor.padding_value
__A = np.pad(A ,((0, 0), (0, difference)) ,"constant" ,constant_values=A )
__A = audio_values.tolist()
for i in range(A ):
__A = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A = sliced_audio.reshape(A ,-1 )
return audio_values
| 15 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 | 0 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __A ( A_ ,A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
lowerCAmelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
lowercase__ : Any = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
lowercase__ : List[str] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
lowercase__ : List[Any] = CLIPTextModel(_snake_case )
lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : Dict = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase ( self : Any ,_snake_case : List[Any] ,_snake_case : Any=0 ) -> Any:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Optional[Any] = torch.manual_seed(_snake_case )
else:
lowercase__ : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : List[Any] = 2
lowercase__ : Optional[int] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,)
lowercase__ : str = floats_tensor(control_image.shape ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : Optional[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(_snake_case : Optional[int] ):
if isinstance(_snake_case ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase__ : Any = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowercase__ : Any = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowercase__ : Dict = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowercase__ : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
lowercase__ : int = CLIPTextModel(_snake_case )
lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : int = MultiControlNetModel([controlneta, controlneta] )
lowercase__ : Optional[Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Union[str, Any]=0 ) -> List[Any]:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : int = torch.manual_seed(_snake_case )
else:
lowercase__ : Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : int = 2
lowercase__ : Optional[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,),
]
lowercase__ : Dict = floats_tensor(control_image[0].shape ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Dict = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : Optional[int] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
lowercase__ : Optional[Any] = 10.0
lowercase__ : Tuple = 4
lowercase__ : Dict = self.get_dummy_inputs(_snake_case )
lowercase__ : Optional[Any] = steps
lowercase__ : Any = scale
lowercase__ : Optional[Any] = pipe(**_snake_case )[0]
lowercase__ : List[str] = self.get_dummy_inputs(_snake_case )
lowercase__ : Optional[int] = steps
lowercase__ : int = scale
lowercase__ : List[str] = pipe(**_snake_case ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
lowercase__ : int = self.get_dummy_inputs(_snake_case )
lowercase__ : Optional[int] = steps
lowercase__ : Dict = scale
lowercase__ : Dict = pipe(**_snake_case ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
lowercase__ : Dict = self.get_dummy_inputs(_snake_case )
lowercase__ : List[Any] = steps
lowercase__ : Optional[int] = scale
lowercase__ : List[Any] = pipe(**_snake_case ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : Optional[Any] = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
lowercase__ : Any = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,safety_checker=_snake_case ,controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ : List[str] = '''evil space-punk bird'''
lowercase__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
lowercase__ : Tuple = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
lowercase__ : List[Any] = pipe(
_snake_case ,_snake_case ,control_image=_snake_case ,generator=_snake_case ,output_type='''np''' ,num_inference_steps=50 ,strength=0.6 ,)
lowercase__ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
lowercase__ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 16 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_a = (3, 9, -11, 0, 7, 5, 1, -1)
_a = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : Node | None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], UpperCAmelCase__ : Iterable[int] ):
__lowercase = None
for i in sorted(UpperCAmelCase__, reverse=UpperCAmelCase__ ):
__lowercase = Node(UpperCAmelCase__, self.head )
def __iter__( self : str ):
__lowercase = self.head
while node:
yield node.data
__lowercase = node.next_node
def __len__( self : Optional[int] ):
return sum(1 for _ in self )
def __str__( self : List[str] ):
return " -> ".join([str(UpperCAmelCase__ ) for node in self] )
def _A ( UpperCamelCase_ : SortedLinkedList, UpperCamelCase_ : SortedLinkedList) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(UpperCamelCase_) + list(UpperCamelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 17 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 |
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A ={'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''DeiTFeatureExtractor''']
__A =['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : List[str] = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
lowercase : Optional[Any] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = []
lowercase : List[Any] = []
lowercase : str = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase : List[str] = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase : List[Any] = name[1:]
# figure out how many levels deep the name is
lowercase : int = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(SCREAMING_SNAKE_CASE__ )
# read data
lowercase : List[Any] = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
names.append("""/""".join(SCREAMING_SNAKE_CASE__ ) )
arrays.append(SCREAMING_SNAKE_CASE__ )
logger.info(f"Read a total of {len(SCREAMING_SNAKE_CASE__ ):,} layers" )
# Sanity check
if len(set(SCREAMING_SNAKE_CASE__ ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(SCREAMING_SNAKE_CASE__ ) )})" )
lowercase : Union[str, Any] = list(set(SCREAMING_SNAKE_CASE__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = full_name.split("""/""" )
lowercase : int = model
lowercase : List[Any] = []
for i, m_name in enumerate(SCREAMING_SNAKE_CASE__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowercase : Tuple = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , """embeddings""" )
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """encoder""" )
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , """layer""" )
lowercase : Dict = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , """pooler""" )
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowercase : str = getattr(SCREAMING_SNAKE_CASE__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowercase : Tuple = getattr(SCREAMING_SNAKE_CASE__ , """token_type_embeddings""" )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append("""weight""" )
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , """attention""" )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowercase : Tuple = getattr(SCREAMING_SNAKE_CASE__ , """attention""" )
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
lowercase : str = getattr(SCREAMING_SNAKE_CASE__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , """attention""" )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
lowercase : str = getattr(SCREAMING_SNAKE_CASE__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowercase : str = getattr(SCREAMING_SNAKE_CASE__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , """intermediate""" )
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , """weight""" )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
lowercase : List[Any] = """.""".join(SCREAMING_SNAKE_CASE__ )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , SCREAMING_SNAKE_CASE__ ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase : str = array.transpose()
if pointer.shape == array.shape:
lowercase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
lowercase : Any = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = BertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
lowercase : str = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 20 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
SCREAMING_SNAKE_CASE : Union[str, Any] = "<<<<<<< This should probably be modified because it mentions: "
SCREAMING_SNAKE_CASE : Optional[int] = "=======\n>>>>>>>\n"
SCREAMING_SNAKE_CASE : str = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
SCREAMING_SNAKE_CASE : List[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _lowerCamelCase( _a ):
@staticmethod
def UpperCamelCase ( lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = parser.add_parser(
'convert', help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.', )
train_parser.add_argument(
'--tfds_path', type=lowerCamelCase, required=lowerCamelCase, help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.', )
train_parser.add_argument(
'--datasets_directory', type=lowerCamelCase, required=lowerCamelCase, help='Path to the HuggingFace Datasets folder.')
train_parser.set_defaults(func=lowerCamelCase)
def __init__( self, lowerCamelCase, lowerCamelCase, *lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = get_logger('datasets-cli/converting')
_lowercase : Union[str, Any] = tfds_path
_lowercase : Tuple = datasets_directory
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
if os.path.isdir(self._tfds_path):
_lowercase : List[Any] = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
_lowercase : Tuple = os.path.dirname(self._tfds_path)
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.')
_lowercase : List[str] = os.path.abspath(self._datasets_directory)
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''')
_lowercase : Any = []
_lowercase : Union[str, Any] = []
_lowercase : Optional[Any] = {}
if os.path.isdir(self._tfds_path):
_lowercase : int = os.listdir(lowerCamelCase)
else:
_lowercase : List[Any] = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''')
_lowercase : Any = os.path.join(lowerCamelCase, lowerCamelCase)
_lowercase : int = os.path.join(lowerCamelCase, lowerCamelCase)
if not os.path.isfile(lowerCamelCase) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file')
continue
with open(lowerCamelCase, encoding='utf-8') as f:
_lowercase : List[Any] = f.readlines()
_lowercase : Any = []
_lowercase : Tuple = False
_lowercase : Union[str, Any] = False
_lowercase : Optional[int] = []
for line in lines:
_lowercase : Dict = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowercase : Tuple = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
_lowercase : List[Any] = ''
continue
elif "from absl import logging" in out_line:
_lowercase : List[str] = 'from datasets import logging\n'
elif "getLogger" in out_line:
_lowercase : Tuple = out_line.replace('getLogger', 'get_logger')
elif any(expression in out_line for expression in TO_HIGHLIGHT):
_lowercase : Tuple = True
_lowercase : List[str] = list(filter(lambda lowerCamelCase: e in out_line, lowerCamelCase))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase) + '\n')
out_lines.append(lowerCamelCase)
out_lines.append(lowerCamelCase)
continue
else:
for pattern, replacement in TO_CONVERT:
_lowercase : Union[str, Any] = re.sub(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowercase : List[str] = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)', lowerCamelCase)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(','))
_lowercase : Optional[Any] = 'from . import ' + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''')
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowercase : Optional[Any] = True
out_lines.append(lowerCamelCase)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowercase : Optional[Any] = f_name.replace('.py', '')
_lowercase : Tuple = os.path.join(lowerCamelCase, lowerCamelCase)
_lowercase : str = os.path.join(lowerCamelCase, lowerCamelCase)
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
self._logger.info(F'''Adding directory {output_dir}''')
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase)
if needs_manual_update:
with_manual_update.append(lowerCamelCase)
with open(lowerCamelCase, 'w', encoding='utf-8') as f:
f.writelines(lowerCamelCase)
self._logger.info(F'''Converted in {output_file}''')
for utils_file in utils_files:
try:
_lowercase : int = os.path.basename(lowerCamelCase)
_lowercase : int = imports_to_builder_map[f_name.replace('.py', '')]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''')
shutil.copy(lowerCamelCase, lowerCamelCase)
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''')
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''')
| 21 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Dict = logging.get_logger(__name__)
UpperCamelCase__: Optional[Any] = ["model.decoder.embed_positions.weights"]
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
if "emb" in name:
UpperCAmelCase : int = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
UpperCAmelCase : Optional[Any] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
UpperCAmelCase : List[str] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
UpperCAmelCase : Any = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
UpperCAmelCase : str = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
UpperCAmelCase : str = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
UpperCAmelCase : Optional[int] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def snake_case_ ( _lowerCAmelCase : OrderedDict , _lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
UpperCAmelCase : int = list(state_dict.keys() )
UpperCAmelCase : Tuple = {}
for key in keys:
UpperCAmelCase : Optional[int] = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = rename_keys(_lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Union[str, Any] = val[:hidden_size, :]
UpperCAmelCase : str = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : Tuple = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def snake_case_ ( _lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
UpperCAmelCase : Optional[int] = 1024
UpperCAmelCase : List[Any] = 24
UpperCAmelCase : List[Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : Tuple = 1536
UpperCAmelCase : Union[str, Any] = 48
UpperCAmelCase : Tuple = 24
elif checkpoint == "large":
UpperCAmelCase : Tuple = 2048
UpperCAmelCase : List[Any] = 48
UpperCAmelCase : List[Any] = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCAmelCase : List[Any] = MusicgenDecoderConfig(
hidden_size=_lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCAmelCase , num_attention_heads=_lowerCAmelCase , )
return config
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Dict="cpu" ) -> str:
UpperCAmelCase : List[str] = MusicGen.get_pretrained(_lowerCAmelCase , device=_lowerCAmelCase )
UpperCAmelCase : Dict = decoder_config_from_checkpoint(_lowerCAmelCase )
UpperCAmelCase : Any = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_state_dict(
_lowerCAmelCase , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Tuple = TaEncoderModel.from_pretrained('''t5-base''' )
UpperCAmelCase : Dict = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
UpperCAmelCase : Optional[int] = MusicgenForCausalLM(_lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCAmelCase : Any = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase , audio_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : List[str] = model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''t5-base''' )
UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
UpperCAmelCase : int = MusicgenProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# set the appropriate bos/pad token ids
UpperCAmelCase : Optional[int] = 2048
UpperCAmelCase : List[Any] = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_lowerCAmelCase )
processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
UpperCamelCase__: Optional[int] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 23 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase__ ( snake_case_ : dict ) -> tuple:
return (data["data"], data["target"])
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray:
__snake_case = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(snake_case_ , snake_case_ )
# Predict target for test data
__snake_case = xgb.predict(snake_case_ )
__snake_case = predictions.reshape(len(snake_case_ ) , 1 )
return predictions
def lowerCamelCase__ ( ) -> None:
__snake_case = fetch_california_housing()
__snake_case , __snake_case = data_handling(snake_case_ )
__snake_case , __snake_case , __snake_case , __snake_case = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 , random_state=1 )
__snake_case = xgboost(snake_case_ , snake_case_ , snake_case_ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(snake_case_ , snake_case_ )}""" )
print(f"""Mean Square Error : {mean_squared_error(snake_case_ , snake_case_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 24 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowercase_ ( _snake_case = 3 ):
if isinstance(_snake_case ,_snake_case ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
SCREAMING_SNAKE_CASE__ : str = QuantumRegister(_snake_case ,"""qr""" )
SCREAMING_SNAKE_CASE__ : Dict = ClassicalRegister(_snake_case ,"""cr""" )
SCREAMING_SNAKE_CASE__ : Dict = QuantumCircuit(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : int = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,_snake_case ,_snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case ,_snake_case )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend("""qasm_simulator""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = execute(_snake_case ,_snake_case ,shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 25 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 1
@register_to_config
def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]:
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
def a__ ( self , _a , _a = None ) -> Union[str, Any]:
_A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def a__ ( self , _a , _a , _a , _a=None ) -> Dict:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_A : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_A : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
_A : List[Any] = std.unsqueeze(-1 )
_A : int = -score / std
# compute
_A : Tuple = -1.0 / len(self.timesteps )
_A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_A : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_A : Union[str, Any] = beta_t.unsqueeze(-1 )
_A : Tuple = -0.5 * beta_t * x
_A : Tuple = torch.sqrt(_a )
_A : Dict = drift - diffusion**2 * score
_A : Dict = x + drift * dt
# add noise
_A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
_A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 26 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 28 |
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=1_8 , _UpperCamelCase=3_0 , _UpperCamelCase=4_0_0 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = size if size is not None else {'height': 2_0, 'width': 2_0}
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : Union[str, Any] = max_resolution
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : Union[str, Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = do_convert_rgb
UpperCAmelCase_ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase_ : Optional[int] = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
UpperCAmelCase_ : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = PixaStructImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : List[str] = 2_0_4_8
UpperCAmelCase_ : List[str] = image_processor(_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
UpperCAmelCase_ : Any = 'Hello'
UpperCAmelCase_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Any = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ) -> Any:
# Initialize image_processor
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
UpperCAmelCase_ : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : int = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ) -> int:
# Initialize image_processor
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : List[str] = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ : int = 3
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def __UpperCAmelCase ( self ) -> str:
# Initialize image_processor
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Any = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 29 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase__( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Optional[int]:
super().__init__()
lowercase_ = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = False
lowercase_ = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
lowercase_ = TaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , d_model=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , d_kv=SCREAMING_SNAKE_CASE_ , d_ff=SCREAMING_SNAKE_CASE_ , dropout_rate=SCREAMING_SNAKE_CASE_ , feed_forward_proj=SCREAMING_SNAKE_CASE_ , is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ = TaBlock(SCREAMING_SNAKE_CASE_ )
self.encoders.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = TaLayerNorm(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
lowercase_ = self.token_embedder(SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(SCREAMING_SNAKE_CASE_ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.dropout_pre(SCREAMING_SNAKE_CASE_ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for lyr in self.encoders:
lowercase_ = lyr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.layer_norm(SCREAMING_SNAKE_CASE_ )
return self.dropout_post(SCREAMING_SNAKE_CASE_ ), encoder_inputs_mask
| 30 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Dict , A : Dict , A : Union[str, Any]=None ):
_UpperCAmelCase : str = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , A , getattr(A , A ) )
_UpperCAmelCase : str = module._original_module if isinstance(A , _PatchedModuleObj ) else module
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Tuple = []
def __init__( self : int , A : int , A : str , A : Union[str, Any] , A : Dict=None ):
_UpperCAmelCase : Tuple = obj
_UpperCAmelCase : List[Any] = target
_UpperCAmelCase : Any = new
_UpperCAmelCase : str = target.split("." )[0]
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : str = attrs or []
def __enter__( self : List[str] ):
*_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(A ) ):
try:
_UpperCAmelCase : str = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase : Optional[Any] = getattr(self.obj , A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(A , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , A , _PatchedModuleObj(A , attrs=self.attrs ) )
_UpperCAmelCase : int = getattr(self.obj , A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(A , A , _PatchedModuleObj(getattr(A , A , A ) , attrs=self.attrs ) )
_UpperCAmelCase : str = getattr(A , A )
# finally set the target attribute
setattr(A , A , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase : int = getattr(import_module(".".join(A ) ) , A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , A ) is attr_value:
_UpperCAmelCase : List[Any] = getattr(self.obj , A )
setattr(self.obj , A , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase : Union[str, Any] = globals()["__builtins__"][target_attr]
setattr(self.obj , A , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Union[str, Any] , *A : str ):
for attr in list(self.original ):
setattr(self.obj , A , self.original.pop(A ) )
def _A ( self : Dict ):
self.__enter__()
self._active_patches.append(self )
def _A ( self : List[str] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 31 |
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : UNetaDModel
snake_case__ : KarrasVeScheduler
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : KarrasVeScheduler ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 5_0 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
a_ : Tuple = self.unet.config.sample_size
a_ : List[str] = (batch_size, 3, img_size, img_size)
a_ : int = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
a_ : str = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
a_ : str = self.scheduler.schedule[t]
a_ : List[str] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
a_ , a_ : Union[str, Any] = self.scheduler.add_noise_to_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
a_ : Optional[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
a_ : Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
a_ : Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
a_ : Union[str, Any] = self.scheduler.step_correct(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , step_output.prev_sample , step_output['derivative'] , )
a_ : List[str] = step_output.prev_sample
a_ : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
a_ : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ : Any = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 32 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 0 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
def __init__( self : List[str] , A : Optional[Any] , A : Optional[int]=13 , A : List[str]=7 , A : str=True , A : List[Any]=True , A : List[str]=True , A : Dict=True , A : List[str]=99 , A : Any=24 , A : str=2 , A : Optional[Any]=6 , A : str=37 , A : List[Any]="gelu" , A : List[str]=0.1 , A : Union[str, Any]=0.1 , A : str=5_12 , A : List[Any]=16 , A : str=2 , A : Optional[int]=0.02 , A : List[Any]=3 , A : str=None , A : List[str]=10_00 , ) -> List[str]:
lowercase_ : Union[str, Any] = parent
lowercase_ : Optional[Any] = batch_size
lowercase_ : int = seq_length
lowercase_ : Union[str, Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[int] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Any = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[Any] = num_labels
lowercase_ : Dict = scope
lowercase_ : int = range_bbox
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase_ : Optional[int] = bbox[i, j, 3]
lowercase_ : List[str] = bbox[i, j, 1]
lowercase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase_ : Optional[Any] = bbox[i, j, 2]
lowercase_ : Dict = bbox[i, j, 0]
lowercase_ : List[Any] = t
lowercase_ : Optional[int] = None
if self.use_input_mask:
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase_ : Dict = None
if self.use_token_type_ids:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Any = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Tuple = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : int ) -> Optional[int]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , A : str , A : int , A : Dict , A : Optional[Any] , A : Any , A : Optional[int] , A : int , ) -> Tuple:
lowercase_ : str = LiltModel(config=A )
model.to(A )
model.eval()
lowercase_ : Any = model(A , bbox=A , attention_mask=A , token_type_ids=A )
lowercase_ : Dict = model(A , bbox=A , token_type_ids=A )
lowercase_ : Any = model(A , bbox=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Optional[int] , A : List[Any] , A : Any , A : Dict , A : List[Any] , A : int , A : Tuple , A : Optional[Any] , ) -> int:
lowercase_ : int = self.num_labels
lowercase_ : Union[str, Any] = LiltForTokenClassification(config=A )
model.to(A )
model.eval()
lowercase_ : Any = model(
A , bbox=A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : str , A : Any , A : Any , A : Dict , A : Optional[Any] , A : List[Any] , A : Optional[int] , A : List[str] , ) -> Any:
lowercase_ : List[Any] = LiltForQuestionAnswering(config=A )
model.to(A )
model.eval()
lowercase_ : List[Any] = model(
A , bbox=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] ) -> Tuple:
lowercase_ : str = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = config_and_inputs
lowercase_ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def A ( self : int , A : Optional[Any] , A : Union[str, Any] , A : List[str] , A : Union[str, Any] , A : int ) -> str:
return True
def A ( self : Tuple ) -> List[Any]:
lowercase_ : Dict = LiltModelTester(self )
lowercase_ : Tuple = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : Any ) -> int:
self.config_tester.run_common_tests()
def A ( self : Dict ) -> Tuple:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : str ) -> Dict:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Dict = type
self.model_tester.create_and_check_model(*A )
def A ( self : Any ) -> List[str]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def A ( self : List[Any] ) -> Any:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def A ( self : int ) -> str:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = LiltModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : str ) -> List[str]:
lowercase_ : Any = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(A )
lowercase_ : int = torch.tensor([[1, 2]] , device=A )
lowercase_ : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(input_ids=A , bbox=A )
lowercase_ : Optional[int] = torch.Size([1, 2, 7_68] )
lowercase_ : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A , )
self.assertTrue(outputs.last_hidden_state.shape , A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A , atol=1e-3 ) )
| 33 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A =get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class _a ( __a , unittest.TestCase ):
__a : List[Any] = BartphoTokenizer
__a : str = False
__a : str = True
def A ( self : Any ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
UpperCAmelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
UpperCAmelCase = BartphoTokenizer(lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Tuple , **lowercase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Dict , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = '''This is a là test'''
UpperCAmelCase = '''This is a<unk><unk> test'''
return input_text, output_text
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = BartphoTokenizer(lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCAmelCase = '''This is a là test'''
UpperCAmelCase = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
UpperCAmelCase = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
| 34 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
if isinstance(_lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ):
pass
def lowerCamelCase ( self : Optional[int] ):
pass
def lowerCamelCase ( self : Optional[Any] ):
pass
def lowerCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[Any]=None , **snake_case_ : List[Any] ):
snake_case__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel(snake_case_ )
snake_case__ : Tuple = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[Any]=None , **snake_case_ : Union[str, Any] ):
snake_case__ , snake_case__ : List[str] = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
snake_case__ : int = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : str=None , **snake_case_ : Union[str, Any] ):
snake_case__ , snake_case__ : Dict = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
snake_case__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ )
snake_case__ : Dict = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase ( self : Any , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : int=None , **snake_case_ : str ):
snake_case__ , snake_case__ : Union[str, Any] = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Any = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
snake_case__ : int = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
snake_case__ : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
snake_case__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
snake_case__ : Dict = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
snake_case__ : Tuple = after_output[0].numpy()
snake_case__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
def lowerCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , **snake_case_ : List[str] ):
snake_case__ , snake_case__ : Optional[int] = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
snake_case__ : int = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
snake_case__ : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Optional[Any] = to_atuple(vision_model.config.image_size )
snake_case__ : str = to_atuple(vision_model.config.patch_size )
snake_case__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ : Union[str, Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ : Any = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase ( self : str , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float ):
snake_case__ : List[Any] = np.abs((a - b) ).max()
self.assertLessEqual(snake_case_ , snake_case_ , f"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCamelCase ( self : Any ):
snake_case__ : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case_ )
def lowerCamelCase ( self : str ):
snake_case__ : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**snake_case_ )
def lowerCamelCase ( self : int ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case_ )
@slow
def lowerCamelCase ( self : str ):
snake_case__ , snake_case__ : Any = self.get_pretrained_model_and_inputs()
snake_case__ : Union[str, Any] = model_a(**snake_case_ )
snake_case__ : Union[str, Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case_ )
snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
snake_case__ : int = model_a(**snake_case_ )
snake_case__ : Dict = after_outputs[0].numpy()
snake_case__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
@require_tf
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
snake_case__ : Optional[int] = 13
snake_case__ : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ : Any = random_attention_mask([batch_size, 4] )
snake_case__ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] ):
snake_case__ : Union[str, Any] = TFViTModel(snake_case_ , name="""vision_model""" )
snake_case__ : Any = TFBertModel(snake_case_ , name="""text_model""" )
return vision_model, text_model
def lowerCamelCase ( self : str ):
snake_case__ : Union[str, Any] = TFViTModelTester(self )
snake_case__ : str = TFBertModelTester(self )
snake_case__ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
snake_case__ : Any = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = vision_config_and_inputs
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
snake_case__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
snake_case__ : Any = 13
snake_case__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ : Optional[Any] = random_attention_mask([batch_size, 4] )
snake_case__ : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase ( self : List[str] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Optional[int]=None , **snake_case_ : Optional[Any] ):
snake_case__ , snake_case__ : Any = self.get_vision_text_model(snake_case_ , snake_case_ )
snake_case__ : Tuple = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
snake_case__ : Union[str, Any] = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
snake_case__ : str = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case__ : Tuple = to_atuple(vision_model.config.image_size )
snake_case__ : List[Any] = to_atuple(vision_model.config.patch_size )
snake_case__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
snake_case__ : Union[str, Any] = TFDeiTModel(snake_case_ , name="""vision_model""" )
snake_case__ : Tuple = TFRobertaModel(snake_case_ , name="""text_model""" )
return vision_model, text_model
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = TFDeiTModelTester(self )
snake_case__ : Union[str, Any] = TFRobertaModelTester(self )
snake_case__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
snake_case__ : str = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = vision_config_and_inputs
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
snake_case__ : Tuple = 13
snake_case__ : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ : str = random_attention_mask([batch_size, 4] )
snake_case__ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : int ):
snake_case__ : List[str] = TFCLIPVisionModel(snake_case_ , name="""vision_model""" )
snake_case__ : Optional[Any] = TFBertModel(snake_case_ , name="""text_model""" )
return vision_model, text_model
def lowerCamelCase ( self : Dict ):
snake_case__ : int = TFCLIPVisionModelTester(self )
snake_case__ : Optional[int] = TFBertModelTester(self )
snake_case__ : str = clip_model_tester.prepare_config_and_inputs()
snake_case__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ : List[Any] = vision_config_and_inputs
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=snake_case_ )
snake_case__ : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
snake_case__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case__ : List[str] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=snake_case_ , padding=snake_case_ , return_tensors="""np""" )
snake_case__ : int = model(**snake_case_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
snake_case__ : Optional[int] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case_ , atol=1E-3 ) )
| 35 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_snake_case = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
lowerCAmelCase__ : int = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
lowerCAmelCase__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Any = value
elif weight_type == "weight_v":
lowerCAmelCase__ : Optional[int] = value
elif weight_type == "bias":
lowerCAmelCase__ : List[str] = value
elif weight_type == "running_mean":
lowerCAmelCase__ : List[str] = value
elif weight_type == "running_var":
lowerCAmelCase__ : int = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ : Tuple = value
elif weight_type == "inv_freq":
lowerCAmelCase__ : str = value
else:
lowerCAmelCase__ : str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = fairseq_model.state_dict()
lowerCAmelCase__ : Any = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ : int = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ : List[Any] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ : List[Any] = True
if "*" in mapped_key:
lowerCAmelCase__ : str = name.split(UpperCamelCase )[0].split(""".""" )[-2]
lowerCAmelCase__ : List[str] = mapped_key.replace("""*""" , UpperCamelCase )
if "pos_bias_u" in name:
lowerCAmelCase__ : List[str] = None
elif "pos_bias_v" in name:
lowerCAmelCase__ : List[str] = None
elif "weight_g" in name:
lowerCAmelCase__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : Union[str, Any] = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ : Union[str, Any] = """weight"""
elif "running_mean" in name:
lowerCAmelCase__ : List[Any] = """running_mean"""
elif "inv_freq" in name:
lowerCAmelCase__ : int = """inv_freq"""
elif "running_var" in name:
lowerCAmelCase__ : Any = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase__ : Union[str, Any] = """num_batches_tracked"""
else:
lowerCAmelCase__ : str = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : Any = name.split(""".""" )
lowerCAmelCase__ : Optional[int] = int(items[0] )
lowerCAmelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ : Tuple = WavaVecaConformerConfig.from_pretrained(UpperCamelCase , hidden_act="""swish""" )
else:
lowerCAmelCase__ : Union[str, Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ : Tuple = """rotary"""
if is_finetuned:
if dict_path:
lowerCAmelCase__ : int = Dictionary.load(UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ : Tuple = target_dict.pad_index
lowerCAmelCase__ : Union[str, Any] = target_dict.bos_index
lowerCAmelCase__ : List[str] = target_dict.eos_index
lowerCAmelCase__ : Dict = len(target_dict.symbols )
lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , """vocab.json""" )
if not os.path.isdir(UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase ) )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : int = 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = WavaVecaCTCTokenizer(
UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase , )
lowerCAmelCase__ : int = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : List[Any] = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = WavaVecaConformerForCTC(UpperCamelCase )
else:
lowerCAmelCase__ : Tuple = WavaVecaConformerForPreTraining(UpperCamelCase )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase__ : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
lowerCAmelCase__ : List[str] = fairseq.tasks.setup_task(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(UpperCamelCase , UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 37 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : str = jnp.ones((batch_size, length)) / length
return scores
def __snake_case ( self : Optional[Any]):
a : Optional[int] = None
a : Optional[Any] = 20
a : Optional[int] = self._get_uniform_logits(batch_size=2 , length=__UpperCAmelCase)
# tweak scores to not be uniform anymore
a : int = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
a : Union[str, Any] = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
a : Dict = jax.nn.softmax(__UpperCAmelCase , axis=-1)
a : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
a : Dict = FlaxTemperatureLogitsWarper(temperature=1.3)
a : Any = jax.nn.softmax(temp_dist_warper_sharper(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase) , axis=-1)
a : Any = jax.nn.softmax(temp_dist_warper_smoother(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def __snake_case ( self : List[str]):
a : Optional[int] = None
a : Dict = 10
a : str = 2
# create ramp distribution
a : Union[str, Any] = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, vocab_size)).copy()
a : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
a : str = FlaxTopKLogitsWarper(3)
a : Union[str, Any] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
a : List[Any] = 5
a : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
a : Dict = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, length)).copy()
a : Any = top_k_warp_safety_check(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def __snake_case ( self : Tuple):
a : List[Any] = None
a : int = 10
a : Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
a : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
a : List[Any] = FlaxTopPLogitsWarper(0.8)
a : List[str] = np.exp(top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
a : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
# check edge cases with negative and extreme logits
a : int = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
a : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
a : int = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
a : str = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def __snake_case ( self : List[str]):
a : Union[str, Any] = 20
a : Any = 4
a : Any = 0
a : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase)
# check that min length is applied at length 5
a : Dict = ids_tensor((batch_size, 20) , vocab_size=20)
a : Any = 5
a : List[str] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
a : Any = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = 15
a : Dict = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertFalse(jnp.isinf(__UpperCAmelCase).any())
def __snake_case ( self : str):
a : Dict = 20
a : Any = 4
a : str = 0
a : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase)
# check that all scores are -inf except the bos_token_id score
a : Dict = ids_tensor((batch_size, 1) , vocab_size=20)
a : List[Any] = 1
a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : int = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
a : Optional[Any] = 3
a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertFalse(jnp.isinf(__UpperCAmelCase).any())
def __snake_case ( self : Dict):
a : List[Any] = 20
a : Tuple = 4
a : List[Any] = 0
a : str = 5
a : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
# check that all scores are -inf except the eos_token_id when max_length is reached
a : Union[str, Any] = ids_tensor((batch_size, 4) , vocab_size=20)
a : Optional[int] = 4
a : Union[str, Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : str = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
a : Any = 3
a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertFalse(jnp.isinf(__UpperCAmelCase).any())
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = 4
a : int = 10
a : int = 15
a : int = 2
a : List[str] = 1
a : Dict = 15
# dummy input_ids and scores
a : Any = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase)
a : List[Any] = input_ids.copy()
a : Optional[int] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = scores.copy()
# instantiate all dist processors
a : int = FlaxTemperatureLogitsWarper(temperature=0.5)
a : int = FlaxTopKLogitsWarper(3)
a : int = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
a : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase)
a : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase)
a : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a : Tuple = 10
# no processor list
a : str = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : int = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Tuple = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Optional[Any] = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Dict = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : List[str] = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# with processor list
a : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
a : List[Any] = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# scores should be equal
self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def __snake_case ( self : Optional[Any]):
a : Dict = 4
a : Any = 10
a : Dict = 15
a : Union[str, Any] = 2
a : Tuple = 1
a : int = 15
# dummy input_ids and scores
a : List[str] = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase)
a : List[str] = input_ids.copy()
a : Union[str, Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = scores.copy()
# instantiate all dist processors
a : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5)
a : Dict = FlaxTopKLogitsWarper(3)
a : List[str] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
a : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase)
a : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase)
a : str = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a : Optional[int] = 10
# no processor list
def run_no_processor_list(__UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple):
a : List[str] = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : List[str] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Tuple = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : int = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : int = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : List[Any] = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
return scores
# with processor list
def run_processor_list(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]):
a : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
a : Dict = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
return scores
a : List[Any] = jax.jit(__UpperCAmelCase)
a : Union[str, Any] = jax.jit(__UpperCAmelCase)
a : List[Any] = jitted_run_no_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : List[str] = jitted_run_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# scores should be equal
self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 40 |
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
'''simple docstring'''
import heapq
import sys
import numpy as np
_A : Dict =tuple[int, int]
class _lowercase :
def __init__( self: List[str] ):
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Dict = set()
def lowerCamelCase_ ( self: Tuple ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowerCamelCase_ ( self: int ):
return len(self.elements ) == 0
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCamelCase__ )
else:
# update
# print("update", item)
lowerCamelCase__ : List[str] = []
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCamelCase__) , (lowerCamelCase__)) : Optional[Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: int ):
if item in self.set:
self.set.remove(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = []
((lowerCamelCase__) , (lowerCamelCase__)) : Optional[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCamelCase__) , (lowerCamelCase__)) : Tuple = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCamelCase_ ( self: str ):
return self.elements[0][1]
def lowerCamelCase_ ( self: int ):
((lowerCamelCase__) , (lowerCamelCase__)) : str = heapq.heappop(self.elements )
self.set.remove(UpperCamelCase__ )
return (priority, item)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
# euclidean distance
lowerCamelCase__ : Any = np.array(UpperCamelCase )
lowerCamelCase__ : List[Any] = np.array(UpperCamelCase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
# integer division by time variable
return consistent_heuristic(UpperCamelCase , UpperCamelCase ) // t
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Any:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = g_function[start] + Wa * heuristics[i](UpperCamelCase , UpperCamelCase )
return ans
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : Union[str, Any] = np.chararray((n, n) )
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
lowerCamelCase__ : List[str] = """*"""
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
lowerCamelCase__ : Tuple = """#"""
lowerCamelCase__ : Any = """-"""
lowerCamelCase__ : int = back_pointer[goal]
while x != start:
((lowerCamelCase__) , (lowerCamelCase__)) : List[Any] = x
# print(x)
lowerCamelCase__ : Dict = """-"""
lowerCamelCase__ : List[str] = back_pointer[x]
lowerCamelCase__ : int = """-"""
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
lowerCamelCase__ : List[str] = back_pointer[goal]
while x != start:
print(UpperCamelCase , end=""" """ )
lowerCamelCase__ : Union[str, Any] = back_pointer[x]
print(UpperCamelCase )
sys.exit()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Optional[Any]:
for itera in range(UpperCamelCase ):
open_list[itera].remove_element(UpperCamelCase )
# print("s", s)
# print("j", j)
((lowerCamelCase__) , (lowerCamelCase__)) : Tuple = s
lowerCamelCase__ : Optional[Any] = (x - 1, y)
lowerCamelCase__ : List[str] = (x + 1, y)
lowerCamelCase__ : Union[str, Any] = (x, y + 1)
lowerCamelCase__ : Optional[int] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase )
lowerCamelCase__ : str = -1
lowerCamelCase__ : List[Any] = float("""inf""" )
if valid(UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
lowerCamelCase__ : List[str] = g_function[s] + 1
lowerCamelCase__ : Dict = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase , key(UpperCamelCase , 0 , UpperCamelCase , UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase ):
if key(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) <= Wa * key(
UpperCamelCase , 0 , UpperCamelCase , UpperCamelCase ):
open_list[j].put(
UpperCamelCase , key(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
lowerCamelCase__ : Tuple = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_A : List[str] ={0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_A : int =[
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_A : Dict =make_common_ground()
_A : str =blocks_blk
# hyper parameters
_A : List[Any] =1
_A : Union[str, Any] =1
_A : Optional[int] =20
_A : Dict =3 # one consistent and two other inconsistent
# start and end destination
_A : Union[str, Any] =(0, 0)
_A : Optional[int] =(n - 1, n - 1)
_A : Optional[Any] =1
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Dict = {start: 0, goal: float("""inf""" )}
lowerCamelCase__ : Optional[int] = {start: -1, goal: -1}
lowerCamelCase__ : int = []
lowerCamelCase__ : Tuple = set()
for i in range(UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase , key(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : list[int] = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ , lowerCamelCase__ : List[str] = open_list[i].top_show()
visited.add(UpperCamelCase )
expand_state(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
close_list_inad.append(UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Tuple = open_list[0].top_show()
visited.add(UpperCamelCase )
expand_state(
UpperCamelCase , 0 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
close_list_anchor.append(UpperCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 41 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318 | 0 |
'''simple docstring'''
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_snake_case = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__A ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
_snake_case = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
_snake_case = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_snake_case = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_snake_case = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_snake_case = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_snake_case = datetime.date(int(__A ) , int(__A ) , int(__A ) )
# Start math
if m <= 2:
_snake_case = y - 1
_snake_case = m + 12
# maths var
_snake_case = int(str(__A )[:2] )
_snake_case = int(str(__A )[2:] )
_snake_case = int(2.6 * m - 5.3_9 )
_snake_case = int(c / 4 )
_snake_case = int(k / 4 )
_snake_case = int(d + k )
_snake_case = int(t + u + v + x )
_snake_case = int(z - (2 * c) )
_snake_case = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_snake_case = F'Your date {date_input}, is a {days[str(__A )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Union[str, Any] = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
lowercase : Union[str, Any] = parser.parse_args()
zeller(args.date_input)
| 42 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318 | 0 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Tuple:
_lowerCAmelCase : Any = tf.convert_to_tensor(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : str = tf.convert_to_tensor(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tf.cast(math.pi ,x.dtype )
_lowerCAmelCase : int = tf.cast(0.04_47_15 ,x.dtype )
_lowerCAmelCase : Tuple = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCamelCase ,3 )) ))
return x * cdf
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(_lowerCamelCase )
return x * tf.tanh(tf.math.softplus(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCamelCase )
_lowerCAmelCase : Tuple = tf.cast(0.04_47_15 ,x.dtype )
_lowerCAmelCase : Union[str, Any] = tf.cast(0.79_78_84_56_08 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(_lowerCamelCase )
_lowerCAmelCase : Tuple = tf.cast(1.7_02 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Dict:
return tf.clip_by_value(_gelu(_lowerCamelCase ) ,-10 ,10 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Dict=-1 ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase : Dict = tf.split(_lowerCamelCase ,2 ,axis=_lowerCamelCase )
return a * tf.math.sigmoid(_lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> List[Any]:
return tf.keras.activations.gelu(_lowerCamelCase ,approximate=_lowerCamelCase )
_a : Optional[int] = tf.keras.activations.gelu
_a : Any = approximate_gelu_wrap
else:
_a : Dict = _gelu
_a : int = _gelu_new
_a : Dict = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> List[str]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 44 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = tempfile.mkdtemp()
__a = 8
# DPR tok
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
__a = os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
__a = os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCAmelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCAmelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __UpperCAmelCase ( self ):
__a = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
__a = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
__a = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __UpperCAmelCase ( self ):
__a = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
__a = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def __UpperCAmelCase ( self ):
__a = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
__a = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a = tokenizer(_a )
self.assertIsNotNone(_a )
| 45 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=6 , lowercase=17 , lowercase=23 , lowercase=11 , lowercase=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = act_dim
lowerCAmelCase = state_dim
lowerCAmelCase = hidden_size
lowerCAmelCase = max_length
lowerCAmelCase = is_training
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
lowerCAmelCase = random_attention_mask((self.batch_size, self.seq_length) )
lowerCAmelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _snake_case ( self ) -> List[str]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = DecisionTransformerModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (DecisionTransformerModel,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ()
_SCREAMING_SNAKE_CASE = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_SCREAMING_SNAKE_CASE = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = DecisionTransformerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _snake_case ( self ) -> int:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
@slow
def _snake_case ( self ) -> str:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = DecisionTransformerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(lowercase )] , lowercase )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = 2 # number of steps of autoregressive prediction we will perform
lowerCAmelCase = 10 # defined by the RL environment, may be normalized
lowerCAmelCase = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
lowerCAmelCase = model.to(lowercase )
lowerCAmelCase = model.config
torch.manual_seed(0 )
lowerCAmelCase = torch.randn(1 , 1 , config.state_dim ).to(device=lowercase , dtype=torch.floataa ) # env.reset()
lowerCAmelCase = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=lowercase )
lowerCAmelCase = torch.tensor(lowercase , device=lowercase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCAmelCase = state
lowerCAmelCase = torch.zeros(1 , 0 , config.act_dim , device=lowercase , dtype=torch.floataa )
lowerCAmelCase = torch.zeros(1 , 0 , device=lowercase , dtype=torch.floataa )
lowerCAmelCase = torch.tensor(0 , device=lowercase , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowercase ):
lowerCAmelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowercase )] , dim=1 )
lowerCAmelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=lowercase )] , dim=1 )
lowerCAmelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = model(
states=lowercase , actions=lowercase , rewards=lowercase , returns_to_go=lowercase , timesteps=lowercase , attention_mask=lowercase , return_dict=lowercase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowercase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCAmelCase = action_pred[0, -1]
lowerCAmelCase = torch.cat([states, state] , dim=1 )
lowerCAmelCase = returns_to_go[0, -1] - reward
lowerCAmelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCAmelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowercase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 46 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A__ ( unittest.TestCase ):
@property
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_a )
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.dummy_uncond_unet
_SCREAMING_SNAKE_CASE =DDIMScheduler()
_SCREAMING_SNAKE_CASE =self.dummy_vq_model
_SCREAMING_SNAKE_CASE =LDMPipeline(unet=_a , vqvae=_a , scheduler=_a )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =ldm(generator=_a , num_inference_steps=2 , output_type='numpy' ).images
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =ldm(generator=_a , num_inference_steps=2 , output_type='numpy' , return_dict=_a )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_SCREAMING_SNAKE_CASE =1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A__ ( unittest.TestCase ):
def A ( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =ldm(generator=_a , num_inference_steps=5 , output_type='numpy' ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_SCREAMING_SNAKE_CASE =np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_SCREAMING_SNAKE_CASE =1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 47 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__=125 , UpperCamelCase__=None , **UpperCamelCase__ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase : str = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCamelCase : Union[str, Any] = len(set(filter(lambda UpperCamelCase__ : bool("extra_id" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
lowerCamelCase : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
lowerCamelCase : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : Any = extra_ids
lowerCamelCase : Optional[Any] = 2**8 # utf is 8 bits
# define special tokens dict
lowerCamelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowerCamelCase : int = len(self.special_tokens_encoder )
lowerCamelCase : Tuple = len(UpperCamelCase__ )
for i, token in enumerate(UpperCamelCase__ ):
lowerCamelCase : List[str] = self.vocab_size + i - n
lowerCamelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _lowercase ( self ) -> str:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowercase ( self , UpperCamelCase__ ) -> List[int]:
if len(UpperCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : List[str] = self._add_eos_if_not_present(UpperCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
lowerCamelCase : List[str] = self._add_eos_if_not_present(UpperCamelCase__ )
return token_ids_a + token_ids_a
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Any = [chr(UpperCamelCase__ ) for i in text.encode("utf-8" )]
return tokens
def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]:
if token in self.special_tokens_encoder:
lowerCamelCase : Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowerCamelCase : List[str] = self.added_tokens_encoder[token]
elif len(UpperCamelCase__ ) != 1:
lowerCamelCase : Dict = self.unk_token_id
else:
lowerCamelCase : int = ord(UpperCamelCase__ ) + self._num_special_tokens
return token_id
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if index in self.special_tokens_decoder:
lowerCamelCase : List[str] = self.special_tokens_decoder[index]
else:
lowerCamelCase : Any = chr(index - self._num_special_tokens )
return token
def _lowercase ( self , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : str = B""
for token in tokens:
if token in self.special_tokens_decoder:
lowerCamelCase : List[str] = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
lowerCamelCase : int = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
lowerCamelCase : Optional[int] = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
lowerCamelCase : Optional[Any] = token.encode("utf-8" )
else:
lowerCamelCase : Optional[Any] = bytes([ord(UpperCamelCase__ )] )
bstring += tok_string
lowerCamelCase : Union[str, Any] = bstring.decode("utf-8" , errors="ignore" )
return string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
return ()
| 48 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318 | 0 |
from string import ascii_uppercase
__snake_case :str = {str(ord(c) - 55): c for c in ascii_uppercase}
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
__a = ''''''
__a = 0
__a = 0
while div != 1:
__a , __a = divmod(_UpperCAmelCase , _UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
__a = ALPHABET_VALUES[str(_UpperCAmelCase )]
else:
__a = str(_UpperCAmelCase )
new_value += actual_value
__a = num // base
__a = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 49 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
_UpperCAmelCase : Dict = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
_UpperCAmelCase : List[str] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ConvBertTokenizer
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple="[UNK]" , UpperCAmelCase : Optional[int]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : int=None , **UpperCAmelCase : Union[str, Any] , ) -> List[str]:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Union[str, Any] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Dict = do_lower_case
lowerCamelCase__ : Dict = strip_accents
lowerCamelCase__ : Union[str, Any] = tokenize_chinese_chars
lowerCamelCase__ : Tuple = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = do_lower_case
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any=None ) -> Dict:
lowerCamelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : List[Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50 |
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318 | 0 |
from __future__ import annotations
class __snake_case :
def __init__( self : List[Any] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = order
# a_{0} ... a_{k}
UpperCAmelCase_ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase_ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase_ = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase_ = [0.0] * self.order
def lowerCamelCase ( self : Tuple , _snake_case : list[float] , _snake_case : list[float]):
"""simple docstring"""
if len(_snake_case) < self.order:
UpperCAmelCase_ = [1.0, *a_coeffs]
if len(_snake_case) != self.order + 1:
UpperCAmelCase_ = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_snake_case)}"""
)
raise ValueError(_snake_case)
if len(_snake_case) != self.order + 1:
UpperCAmelCase_ = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_snake_case)}"""
)
raise ValueError(_snake_case)
UpperCAmelCase_ = a_coeffs
UpperCAmelCase_ = b_coeffs
def lowerCamelCase ( self : List[Any] , _snake_case : float):
"""simple docstring"""
UpperCAmelCase_ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase_ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase_ = self.input_history[:-1]
UpperCAmelCase_ = self.output_history[:-1]
UpperCAmelCase_ = sample
UpperCAmelCase_ = result
return result
| 51 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowerCamelCase : List[str] = logging.getLogger(__name__)
class A__ ( __snake_case ):
def __init__( self , A_=-1 ):
'''simple docstring'''
UpperCamelCase : Dict = label_idx
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase : List[Any] = mode.value
UpperCamelCase : Dict = os.path.join(A_ , F"""{mode}.txt""" )
UpperCamelCase : Dict = 1
UpperCamelCase : List[str] = []
with open(A_ , encoding="utf-8" ) as f:
UpperCamelCase : Dict = []
UpperCamelCase : Tuple = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=A_ , labels=A_ ) )
guid_index += 1
UpperCamelCase : Dict = []
UpperCamelCase : Optional[Any] = []
else:
UpperCamelCase : str = line.split(" " )
words.append(splits[0] )
if len(A_ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=A_ , labels=A_ ) )
return examples
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(A_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCamelCase : Tuple = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(A_ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if path:
with open(A_ , "r" ) as f:
UpperCamelCase : Union[str, Any] = f.read().splitlines()
if "O" not in labels:
UpperCamelCase : Dict = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A__ ( __snake_case ):
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if path:
with open(A_ , "r" ) as f:
UpperCamelCase : Dict = f.read().splitlines()
if "O" not in labels:
UpperCamelCase : Optional[Any] = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A__ ( __snake_case ):
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase : str = mode.value
UpperCamelCase : List[Any] = os.path.join(A_ , F"""{mode}.txt""" )
UpperCamelCase : Tuple = 1
UpperCamelCase : Dict = []
with open(A_ , encoding="utf-8" ) as f:
for sentence in parse_incr(A_ ):
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : Optional[Any] = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(A_ ) == len(A_ )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=A_ , labels=A_ ) )
guid_index += 1
return examples
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = 0
for sentence in parse_incr(A_ ):
UpperCamelCase : int = preds_list[example_id]
UpperCamelCase : Tuple = ""
for token in sentence:
out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(A_ )
example_id += 1
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if path:
with open(A_ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 52 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 | 0 |
'''simple docstring'''
import os
def lowercase__ ( __lowercase : str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(__lowercase ) , __lowercase ) ) as in_file:
__UpperCamelCase = in_file.read()
__UpperCamelCase = [[int(__lowercase ) for cell in row.split(',' )] for row in data.strip().splitlines()]
__UpperCamelCase = [[0 for cell in row] for row in grid]
__UpperCamelCase = len(grid[0] )
__UpperCamelCase = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
__UpperCamelCase = grid[0][0]
for i in range(1 , __lowercase ):
__UpperCamelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , __lowercase ):
__UpperCamelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
__UpperCamelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'{solution() = }')
| 53 |
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ():
'''simple docstring'''
return 1
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ = 200 ):
'''simple docstring'''
return two_pound(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 54 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __snake_case ( ):
lowerCamelCase_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCAmelCase_ )
lowerCamelCase_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=UpperCAmelCase_ )
env_command_parser(subparsers=UpperCAmelCase_ )
launch_command_parser(subparsers=UpperCAmelCase_ )
tpu_command_parser(subparsers=UpperCAmelCase_ )
test_command_parser(subparsers=UpperCAmelCase_ )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(UpperCAmelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 55 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a : List[Any] = 'CompVis/stable-diffusion-v1-1'
a : Union[str, Any] = 'CompVis/stable-diffusion-v1-2'
a : Tuple = 'CompVis/stable-diffusion-v1-3'
a : Optional[Any] = 'CompVis/stable-diffusion-v1-4'
class a ( _lowerCamelCase ):
def __init__( self : Any , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ : StableDiffusionSafetyChecker , lowercase_ : CLIPImageProcessor , lowercase_ : bool = True , ):
super()._init_()
snake_case_ = StableDiffusionPipeline.from_pretrained(lowercase_ )
snake_case_ = StableDiffusionPipeline.from_pretrained(lowercase_ )
snake_case_ = StableDiffusionPipeline.from_pretrained(lowercase_ )
snake_case_ = StableDiffusionPipeline(
vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , requires_safety_checker=lowercase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A_ ( self : Optional[int] ):
return {k: getattr(self , lowercase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def A_ ( self : List[Any] , lowercase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def A_ ( self : Optional[int] ):
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def A_ ( self : Any , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : Union[str, Any] , ):
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A_ ( self : str , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : Optional[int] , ):
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A_ ( self : Tuple , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : Tuple , ):
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A_ ( self : Optional[Any] , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : int , ):
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A_ ( self : Optional[int] , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : int , ):
snake_case_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowercase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 56 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318 | 0 |
"""simple docstring"""
A : List[Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = set()
# keep track of all the paths to be checked
__lowerCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase = path[-1]
if node not in explored:
__lowerCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase = list(_UpperCamelCase )
new_path.append(_UpperCamelCase )
queue.append(_UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase = [start]
__lowerCAmelCase = set(_UpperCamelCase )
# Keep tab on distances from `start` node.
__lowerCAmelCase = {start: 0, target: -1}
while queue:
__lowerCAmelCase = queue.pop(0 )
if node == target:
__lowerCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCamelCase )
queue.append(_UpperCamelCase )
__lowerCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 57 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 58 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger("""transformers.models.speecht5""")
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
hf_model.apply_weight_norm()
snake_case : Optional[Any] = checkpoint["input_conv.weight_g"]
snake_case : Union[str, Any] = checkpoint["input_conv.weight_v"]
snake_case : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
snake_case : List[Any] = checkpoint[f"""upsamples.{i}.1.weight_g"""]
snake_case : Dict = checkpoint[f"""upsamples.{i}.1.weight_v"""]
snake_case : Dict = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case : Union[str, Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
snake_case : List[str] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
snake_case : Optional[int] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
snake_case : List[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
snake_case : List[str] = checkpoint["output_conv.1.weight_g"]
snake_case : Optional[int] = checkpoint["output_conv.1.weight_v"]
snake_case : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , ):
if config_path is not None:
snake_case : Any = SpeechTaHifiGanConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : List[Any] = SpeechTaHifiGanConfig()
snake_case : Tuple = SpeechTaHifiGan(__lowerCamelCase )
snake_case : Any = torch.load(__lowerCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , __lowerCamelCase , __lowerCamelCase )
snake_case : int = np.load(__lowerCamelCase )
snake_case : List[str] = stats[0].reshape(-1 )
snake_case : Dict = stats[1].reshape(-1 )
snake_case : Optional[Any] = torch.from_numpy(__lowerCamelCase ).float()
snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).float()
model.save_pretrained(__lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 59 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
class snake_case_( a__ , a__ ):
__UpperCamelCase = 1
@register_to_config
def __init__( self : str , UpperCamelCase_ : int = 2_0_0_0 , UpperCamelCase_ : float = 0.15 , UpperCamelCase_ : float = 0.01 , UpperCamelCase_ : float = 1_348.0 , UpperCamelCase_ : float = 1E-5 , UpperCamelCase_ : int = 1 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase : Union[str, Any] = sigma_max
# setable values
lowerCAmelCase : Dict = None
self.set_sigmas(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ):
return sample
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : float = None , UpperCamelCase_ : Union[str, torch.device] = None ):
lowerCAmelCase : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase : Dict = torch.linspace(1 , UpperCamelCase_ , UpperCamelCase_ , device=UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : float = None , UpperCamelCase_ : float = None , UpperCamelCase_ : float = None ):
lowerCAmelCase : Optional[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase : Union[str, Any] = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(UpperCamelCase_ ) , math.log(UpperCamelCase_ ) , UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCAmelCase : Optional[int] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase : Any = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase : int = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase : Union[str, Any] = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase : Any = self.get_adjacent_sigma(UpperCamelCase_ , UpperCamelCase_ ).to(sample.device )
lowerCAmelCase : Union[str, Any] = torch.zeros_like(UpperCamelCase_ )
lowerCAmelCase : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase : List[str] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase : List[Any] = diffusion.unsqueeze(-1 )
lowerCAmelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase : str = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCamelCase_ , device=sample.device , dtype=sample.dtype )
lowerCAmelCase : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCamelCase_ , prev_sample_mean=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCamelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase : Tuple = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase : str = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase : Optional[int] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase : Any = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase : str = step_size.unsqueeze(-1 )
lowerCAmelCase : Optional[Any] = sample + step_size * model_output
lowerCAmelCase : List[str] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase : List[Any] = timesteps.to(original_samples.device )
lowerCAmelCase : Optional[int] = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase : str = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCamelCase_ ) * sigmas[:, None, None, None]
)
lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 60 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.