code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase: Optional[int] = logging.get_logger(__name__)
class _lowercase ( snake_case__ ):
"""simple docstring"""
__A = ["input_features", "is_longer"]
def __init__(self , lowerCamelCase_=64 , lowerCamelCase_=48000 , lowerCamelCase_=480 , lowerCamelCase_=10 , lowerCamelCase_=1024 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_ = 0 , lowerCamelCase_ = 14000 , lowerCamelCase_ = None , lowerCamelCase_ = "fusion" , lowerCamelCase_ = "repeatpad" , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(
feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
a = top_db
a = truncation
a = padding
a = fft_window_size
a = (fft_window_size >> 1) + 1
a = hop_length
a = max_length_s
a = max_length_s * sampling_rate
a = sampling_rate
a = frequency_min
a = frequency_max
a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm=snake_case__ , mel_scale="htk" , )
a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm="slaney" , mel_scale="slaney" , )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = spectrogram(
snake_case__ , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case__ , log_mel="dB" , )
return log_mel_spectrogram.T
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
a = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
a = [0]
# randomly choose index for each part
a = np.random.choice(ranges[0] )
a = np.random.choice(ranges[1] )
a = np.random.choice(ranges[2] )
a = mel[idx_front : idx_front + chunk_frames, :]
a = mel[idx_middle : idx_middle + chunk_frames, :]
a = mel[idx_back : idx_back + chunk_frames, :]
a = torch.tensor(mel[None, None, :] )
a = torch.nn.functional.interpolate(
snake_case__ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=snake_case__ )
a = mel_shrink[0][0].numpy()
a = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
a = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
a = len(snake_case__ ) - max_length
a = np.random.randint(0 , overflow + 1 )
a = waveform[idx : idx + max_length]
a = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
a = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
a = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
a = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
a = np.stack([mel, mel, mel, mel] , axis=0 )
a = False
else:
a = self._random_mel_fusion(snake_case__ , snake_case__ , snake_case__ )
a = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
a = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
a = int(max_length / len(snake_case__ ) )
a = np.stack(np.tile(snake_case__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
a = int(max_length / len(snake_case__ ) )
a = np.stack(np.tile(snake_case__ , snake_case__ ) )
a = np.pad(snake_case__ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
a = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
a = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
a = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__(self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
a = truncation if truncation is not None else self.truncation
a = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a = is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
a = np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray(snake_case__ )]
# convert to mel spectrogram, truncate and pad if needed.
a = [
self._get_input_mel(snake_case__ , max_length if max_length else self.nb_max_samples , snake_case__ , snake_case__ )
for waveform in raw_speech
]
a = []
a = []
for mel, longer in padded_inputs:
input_mel.append(snake_case__ )
is_longer.append(snake_case__ )
if truncation == "fusion" and sum(snake_case__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
a = np.random.randint(0 , len(snake_case__ ) )
a = True
if isinstance(input_mel[0] , snake_case__ ):
a = [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
a = [[longer] for longer in is_longer]
a = {"input_features": input_mel, "is_longer": is_longer}
a = BatchFeature(snake_case__ )
if return_tensors is not None:
a = input_features.convert_to_tensors(snake_case__ )
return input_features
| 227 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Optional[int] , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : List[str] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Union[str, Any] =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[Any] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : int =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : int =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Union[str, Any] =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : int =self.encoder(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.quantize(snake_case__ )
else:
lowerCamelCase_ : List[Any] =h
lowerCamelCase_ : List[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : Dict =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Dict =sample
lowerCamelCase_ : Optional[Any] =self.encode(snake_case__ ).latents
lowerCamelCase_ : str =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 144 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict=None )->Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
A__ = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
A__ = nn.Parameter(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] )->Optional[Any]:
# set torch weights for 1-to-1 comparison
A__ = np.asarray(weights[0] )
A__ = np.asarray(weights[1] )
A__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict )->Any:
# set torch weights for 1-to-1 comparison
A__ = np.asarray(weights[0] )
A__ = np.asarray(weights[1] )
A__ = np.asarray(weights[2] )
A__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] )->Optional[int]:
# layernorm 1
A__ = weights[0][0][0]
A__ = np.asarray(layer_norm_a[0] )
A__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
A__ = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
A__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
A__ = intermediate_weights[2]
# layernorm 2
A__ = np.asarray(intermediate_weights[0][0] )
A__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
A__ = np.asarray(intermediate_weights[1][0] )
A__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
A__ = np.asarray(intermediate_weights[4][0] )
A__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str )->Any:
# reformer model
A__ = torch_model.reformer
# word embeds
A__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
A__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
A__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"{position_embeddings[emb_idx]} emb does not match"
A__ = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
A__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
A__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
A__ = np.asarray(weights[7][0] )
A__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
A__ = np.asarray(weights[9][0] )
A__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple )->Tuple:
# Initialise PyTorch model
A__ = ReformerConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
A__ = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , '''rb''' ) as f:
A__ = pickle.load(UpperCamelCase__ )['''weights''']
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
a__: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__: Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 39 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe(
[prompt],generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=15,output_type='''np''',use_karras_sigmas=__lowerCamelCase,)
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a ( metaclass=_lowerCAmelCase ):
UpperCAmelCase_ : Tuple =["torch", "scipy"]
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch', 'scipy'] )
| 220 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowercase : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase__ ( snake_case_ :List[Any] ):
if isinstance(snake_case_ , torch.Tensor ):
return image
elif isinstance(snake_case_ , PIL.Image.Image ):
__UpperCAmelCase = [image]
__UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image]
__UpperCAmelCase = torch.stack(snake_case_ )
return image
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowercase : str , _lowercase : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def a ( self : int , _lowercase : List[str] ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
# get the original timestep using init_timestep
__UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase )
__UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ):
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' )
__UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase = init_latents.shape
__UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
print('''add noise to latents at timestep''' , _lowercase )
__UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ):
self.check_inputs(_lowercase )
# 2. Preprocess image
__UpperCAmelCase = preprocess(_lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
__UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device )
__UpperCAmelCase = timesteps[:1].repeat(_lowercase )
# 4. Prepare latent variables
__UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase )
__UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(_lowercase ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowercase )
| 332 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
a__: List[str] = len(_SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
a__: Any = 0
count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 | """simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
a__: str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__: List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = self.dummy_uncond_unet
a__: Optional[int] = DDIMScheduler()
a__: Optional[int] = self.dummy_vq_model
a__: Union[str, Any] = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase)
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: str = torch.manual_seed(0)
a__: Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy').images
a__: Union[str, Any] = torch.manual_seed(0)
a__: int = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase)[0]
a__: Union[str, Any] = image[0, -3:, -3:, -1]
a__: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
a__: Optional[Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256')
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: List[str] = torch.manual_seed(0)
a__: Optional[int] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy').images
a__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a__: int = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
a__: Any = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 203 | 0 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : List[str] = logging.get_logger(__name__)
class lowerCamelCase ( __a ):
'''simple docstring'''
def __init__( self : List[Any] , **lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ["""bs4"""] )
super().__init__(**a_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : Any =[]
A__ : List[Any] =[]
A__ : Dict =element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
A__ : Any =parent.find_all(child.name , recursive=a_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(a_ ) else next(i for i, s in enumerate(a_ , 1 ) if s is child ) )
A__ : Union[str, Any] =parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : Tuple =BeautifulSoup(a_ , """html.parser""" )
A__ : Any =[]
A__ : Dict =[]
A__ : str =[]
for element in html_code.descendants:
if type(a_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
A__ : Dict =html.unescape(a_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(a_ )
A__ : Dict =self.xpath_soup(a_ )
stringaxtag_seq.append(a_ )
stringaxsubs_seq.append(a_ )
if len(a_ ) != len(a_ ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(a_ ) != len(a_ ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowercase__ ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
A__ : str =""""""
for tagname, subs in zip(a_ , a_ ):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self : Tuple , lowerCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =False
# Check that strings has a valid type
if isinstance(a_ , a_ ):
A__ : Optional[int] =True
elif isinstance(a_ , (list, tuple) ):
if len(a_ ) == 0 or isinstance(html_strings[0] , a_ ):
A__ : int =True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f"but is of type {type(a_ )}." )
A__ : int =bool(isinstance(a_ , (list, tuple) ) and (isinstance(html_strings[0] , a_ )) )
if not is_batched:
A__ : List[Any] =[html_strings]
# Get nodes + xpaths
A__ : List[str] =[]
A__ : Any =[]
for html_string in html_strings:
A__ : Tuple =self.get_three_from_single(a_ )
nodes.append(a_ )
A__ : str =[]
for node, tag_list, sub_list in zip(a_ , a_ , a_ ):
A__ : Any =self.construct_xpath(a_ , a_ )
xpath_strings.append(a_ )
xpaths.append(a_ )
# return as Dict
A__ : int ={"""nodes""": nodes, """xpaths""": xpaths}
A__ : Tuple =BatchFeature(data=a_ , tensor_type=a_ )
return encoded_inputs
| 134 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = ort.SessionOptions()
_snake_case : Union[str, Any] = False
return options
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_snake_case : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=a_, feature_extractor=a_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = """A red cat sitting on a park bench"""
_snake_case : Optional[int] = np.random.RandomState(0 )
_snake_case : Any = pipe(
prompt=a_, image=a_, mask_image=a_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=a_, output_type="""np""", )
_snake_case : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 64 | 0 |
UpperCAmelCase : Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = [False] * len(lowerCamelCase__ )
lowerCamelCase = [s]
lowerCamelCase = True
while queue:
lowerCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase__ )
lowerCamelCase = True
lowerCamelCase = u
return visited[t]
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = [-1] * (len(lowerCamelCase__ ))
lowerCamelCase = 0
lowerCamelCase = []
lowerCamelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase = float("""Inf""" )
lowerCamelCase = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase = min(lowerCamelCase__ , graph[parent[s]][s] )
lowerCamelCase = parent[s]
max_flow += path_flow
lowerCamelCase = sink
while v != source:
lowerCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase = parent[v]
for i in range(len(lowerCamelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 66 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 308 |
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''') | 308 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCAmelCase = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
lowerCAmelCase = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
lowerCAmelCase = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return float((preds == labels).mean() )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
lowercase__ = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 93 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
_lowercase : int
_lowercase : TreeNode | None = None
_lowercase : TreeNode | None = None
lowerCAmelCase = namedtuple('CoinsDistribResult', 'moves excess')
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE ) != count_coins(SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ , lowercase__ = get_distrib(node.left )
lowercase__ , lowercase__ = get_distrib(node.right )
lowercase__ = 1 - left_distrib_excess
lowercase__ = 1 - right_distrib_excess
lowercase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE )
+ abs(SCREAMING_SNAKE_CASE )
)
lowercase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return get_distrib(SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a_ ( __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =[2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ =True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ =True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ =True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ =[3, 3, 3, 3]
lowerCamelCase_ =[5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ =[4, 4, 4, 4]
lowerCamelCase_ =[3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ =[3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ =[3, 3, 3, 3]
else:
lowerCamelCase_ =[2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ =96
elif "small" in model_name:
lowerCamelCase_ =96
elif "base" in model_name:
lowerCamelCase_ =128
elif "large" in model_name:
lowerCamelCase_ =192
elif "xlarge" in model_name:
lowerCamelCase_ =256
elif "huge" in model_name:
lowerCamelCase_ =352
# set label information
lowerCamelCase_ ='''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ ='''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ ='''imagenet-1k-id2label.json'''
lowerCamelCase_ =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
lowerCamelCase_ =FocalNetConfig(
embed_dim=__snake_case , depths=__snake_case , focal_levels=__snake_case , focal_windows=__snake_case , use_conv_embed=__snake_case , idalabel=__snake_case , labelaid=__snake_case , use_post_layernorm=__snake_case , use_layerscale=__snake_case , )
return config
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCamelCase_ =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ ='''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ =name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ =name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ =name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ =name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ =name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ =name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ ='''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ ='''layernorm.bias'''
if "head" in name:
lowerCamelCase_ =name.replace('''head''' , '''classifier''' )
else:
lowerCamelCase_ ='''focalnet.''' + name
return name
def a_ ( __snake_case : List[Any] , __snake_case : List[str] , __snake_case : int=False ) -> Optional[int]:
"""simple docstring"""
# fmt: off
lowerCamelCase_ ={
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ =model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __snake_case )
lowerCamelCase_ =torch.hub.load_state_dict_from_url(__snake_case , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ =state_dict.pop(__snake_case )
lowerCamelCase_ =val
lowerCamelCase_ =get_focalnet_config(__snake_case )
lowerCamelCase_ =FocalNetForImageClassification(__snake_case )
model.eval()
# load state dict
model.load_state_dict(__snake_case )
# verify conversion
lowerCamelCase_ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ =BitImageProcessor(
do_resize=__snake_case , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__snake_case , crop_size=224 , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , )
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
lowerCamelCase_ =processor(images=__snake_case , return_tensors='''pt''' )
lowerCamelCase_ =transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCamelCase_ =image_transforms(__snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __snake_case , atol=1e-4 )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ =torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ =torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCamelCase_ =torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ =torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCamelCase_ =torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ =torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
a_ : int = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 75 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __UpperCamelCase :
lowercase : Union[str, Any] =XGLMConfig
lowercase : Optional[Any] ={}
lowercase : Optional[int] ='gelu'
def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =d_model
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =ffn_dim
lowerCamelCase_ =activation_function
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =initializer_range
lowerCamelCase_ =None
lowerCamelCase_ =0
lowerCamelCase_ =2
lowerCamelCase_ =1
def lowercase__ ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =self.get_config()
lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else ()
lowercase : Tuple =(
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowercase : Optional[Any] =False
lowercase : Optional[Any] =False
lowercase : Optional[int] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def lowercase__ ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self, lowerCAmelCase=True ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' )
lowerCamelCase_ =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] )
lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =(
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ ='''left'''
# use different length sentences to test batching
lowerCamelCase_ =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase )
lowerCamelCase_ =inputs['''input_ids''']
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 )
lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 )
lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 )
lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
| 75 | 1 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase__(A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int]= AlbertConfig.from_json_file(A__ )
print(f'''Building PyTorch model from configuration: {config}''' )
lowercase__ : int= AlbertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(A__ , A__ , A__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 351 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Union[str, Any] = False
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= "A painting of a squirrel eating a burger "
lowercase__ : Optional[Any]= torch.manual_seed(0 )
lowercase__ : List[str]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= generator.manual_seed(0 )
lowercase__ : Tuple= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : List[str]= "A painting of a squirrel eating a burger "
lowercase__ : Union[str, Any]= torch.manual_seed(0 )
lowercase__ : Optional[Any]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase__ : List[str]= image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[int]= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 150 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase__ : Optional[int] =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase__ : Union[str, Any] =field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase__ : Optional[Any] =field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase__ : Optional[int] =field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase__ : Tuple =field(
default=UpperCamelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase__ : Union[str, Any] =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase__ : Optional[Any] =field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class A :
UpperCamelCase__ : int =field(default=UpperCamelCase_ , metadata={'help': 'The input training data file (a text file).'} )
UpperCamelCase__ : str =field(
default=UpperCamelCase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
UpperCamelCase__ : int =field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
UpperCamelCase__ : Dict =field(
default=UpperCamelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase__ : List[Any] =field(
default=UpperCamelCase_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase__ : Union[str, Any] =field(
default=UpperCamelCase_ , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
UpperCamelCase__ : int =field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase__ : Any =field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
if self.train_file is not None:
_lowerCamelCase : List[Any] =self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_lowerCamelCase : Union[str, Any] =self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A :
UpperCamelCase__ : List[str] =42
UpperCamelCase__ : Any =True
UpperCamelCase__ : List[str] =None
UpperCamelCase__ : Any =None
def __call__( self : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] ='label' if 'label' in features[0].keys() else 'labels'
_lowerCamelCase : List[Any] =[feature.pop(lowerCamelCase__ ) for feature in features]
_lowerCamelCase : Dict =len(lowerCamelCase__ )
_lowerCamelCase : List[str] =len(features[0]['input_ids'] )
_lowerCamelCase : List[Any] =[
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_lowerCamelCase : str =list(chain(*lowerCamelCase__ ) )
_lowerCamelCase : Tuple =self.tokenizer.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
_lowerCamelCase : str ={k: v.view(lowerCamelCase__ , lowerCamelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
_lowerCamelCase : Optional[int] =torch.tensor(lowerCamelCase__ , dtype=torch.intaa )
return batch
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Any =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase : Optional[int] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase : str =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] =training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowerCamelCase : Union[str, Any] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : List[str] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_lowerCamelCase : Optional[int] ={}
if data_args.train_file is not None:
_lowerCamelCase : Tuple =data_args.train_file
if data_args.validation_file is not None:
_lowerCamelCase : Tuple =data_args.validation_file
_lowerCamelCase : Any =data_args.train_file.split('.' )[-1]
_lowerCamelCase : Union[str, Any] =load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_lowerCamelCase : List[str] =load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Union[str, Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : int =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : Dict =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_lowerCamelCase : Any =[F'''ending{i}''' for i in range(4 )]
_lowerCamelCase : int ='sent1'
_lowerCamelCase : List[str] ='sent2'
if data_args.max_seq_length is None:
_lowerCamelCase : int =tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_lowerCamelCase : int =1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowerCamelCase : Optional[int] =min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE__ : Dict ):
_lowerCamelCase : str =[[context] * 4 for context in examples[context_name]]
_lowerCamelCase : Optional[Any] =examples[question_header_name]
_lowerCamelCase : Tuple =[
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_lowerCamelCase : Optional[int] =list(chain(*UpperCAmelCase_ ) )
_lowerCamelCase : Optional[Any] =list(chain(*UpperCAmelCase_ ) )
# Tokenize
_lowerCamelCase : Tuple =tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowerCamelCase : Optional[Any] =raw_datasets['train']
if data_args.max_train_samples is not None:
_lowerCamelCase : Tuple =min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_lowerCamelCase : Tuple =train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_lowerCamelCase : Union[str, Any] =train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowerCamelCase : str =raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowerCamelCase : Union[str, Any] =min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_lowerCamelCase : str =eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_lowerCamelCase : Dict =eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_lowerCamelCase : List[Any] =(
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE__ : Tuple ):
_lowerCamelCase : Union[str, Any] =eval_predictions
_lowerCamelCase : List[str] =np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_lowerCamelCase : Optional[int] =Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_lowerCamelCase : Optional[int] =None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : str =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : int =last_checkpoint
_lowerCamelCase : List[str] =trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_lowerCamelCase : Union[str, Any] =train_result.metrics
_lowerCamelCase : Optional[Any] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_lowerCamelCase : Optional[Any] =min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCamelCase : List[Any] =trainer.evaluate()
_lowerCamelCase : Dict =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_lowerCamelCase : int =min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_lowerCamelCase : Optional[int] ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def a_ ( SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 199 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( lowercase ):
def __init__( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = dataset
_UpperCamelCase : Optional[Any] = process
_UpperCamelCase : Optional[Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.dataset[i]
_UpperCamelCase : Dict = self.process(lowerCamelCase__ ,**self.params )
return processed
class lowercase__ ( lowercase ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=None ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = loader
_UpperCamelCase : Tuple = infer
_UpperCamelCase : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCamelCase : Any = None
_UpperCamelCase : Union[str, Any] = loader_batch_size
# Internal bookkeeping
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : str = None
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = iter(self.loader )
return self
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCamelCase : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCamelCase : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Convert ModelOutput to tuple first
_UpperCamelCase : str = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCamelCase : Optional[int] = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : Optional[Any] = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCamelCase : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCamelCase : Optional[int] = self._loader_batch_data.__class__(lowerCamelCase__ )
self._loader_batch_index += 1
return result
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCamelCase : Tuple = next(self.iterator )
_UpperCamelCase : List[str] = self.infer(lowerCamelCase__ ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : List[Any] = processed
else:
_UpperCamelCase : List[Any] = list(processed.keys() )[0]
_UpperCamelCase : Optional[int] = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = len(lowerCamelCase__ )
else:
_UpperCamelCase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : int = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCamelCase : Dict = processed
_UpperCamelCase : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( lowercase ):
def __init__( self : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __iter__( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = iter(self.loader )
_UpperCamelCase : List[str] = None
return self
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
_UpperCamelCase : Tuple = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
_UpperCamelCase : Optional[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
_UpperCamelCase : int = next(self.subiterator )
return processed
class lowercase__ ( lowercase ):
def __iter__( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = iter(self.loader )
return self
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : Dict = self.loader_batch_item()
_UpperCamelCase : List[str] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
while not is_last:
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : str = processed
else:
_UpperCamelCase : Any = list(processed.keys() )[0]
_UpperCamelCase : Tuple = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Dict = len(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : Any = observed_batch_size
_UpperCamelCase : List[Any] = processed
_UpperCamelCase : int = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : List[Any] = self.loader_batch_item()
_UpperCamelCase : Optional[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
else:
_UpperCamelCase : Any = processed
_UpperCamelCase : List[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
return accumulator
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : str = key
def __len__( self : Dict ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : Optional[Any] = keya
_UpperCamelCase : str = keya
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 83 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : List[Any] , *a_ : Optional[int] , **a_ : Optional[int] ):
super().__init__(*a_ , **a_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase ( self : str , a_ : Optional[Any]=None , a_ : Tuple=None , a_ : Optional[Any]=None ):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : Union[str, Any] = {}
if prompt is not None:
lowerCAmelCase_ : Any = prompt
if generate_kwargs is not None:
lowerCAmelCase_ : Tuple = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase_ : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
lowerCAmelCase_ : Union[str, Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[Any] , a_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a_ : List[Any] ):
return super().__call__(a_ , **a_ )
def lowerCamelCase ( self : int , a_ : str , a_ : Union[str, Any]=None ):
lowerCAmelCase_ : Union[str, Any] = load_image(a_ )
if prompt is not None:
if not isinstance(a_ , a_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(a_ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
lowerCAmelCase_ : List[Any] = self.model.config.model_type
if model_type == "git":
lowerCAmelCase_ : List[Any] = self.image_processor(images=a_ , return_tensors=self.framework )
lowerCAmelCase_ : Optional[int] = self.tokenizer(text=a_ , add_special_tokens=a_ ).input_ids
lowerCAmelCase_ : str = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase_ : Optional[Any] = torch.tensor(a_ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase_ : Optional[Any] = self.image_processor(images=a_ , header_text=a_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase_ : Any = self.image_processor(images=a_ , return_tensors=self.framework )
lowerCAmelCase_ : Dict = self.tokenizer(a_ , return_tensors=self.framework )
model_inputs.update(a_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCAmelCase_ : Optional[Any] = self.image_processor(images=a_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase_ : List[str] = None
return model_inputs
def lowerCamelCase ( self : Dict , a_ : str , a_ : Dict=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , a_ )
and all(x is None for x in model_inputs["input_ids"] )
):
lowerCAmelCase_ : str = None
if generate_kwargs is None:
lowerCAmelCase_ : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase_ : str = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase_ : Dict = self.model.generate(a_ , **a_ , **a_ )
return model_outputs
def lowerCamelCase ( self : str , a_ : List[str] ):
lowerCAmelCase_ : int = []
for output_ids in model_outputs:
lowerCAmelCase_ : Optional[Any] = {
"generated_text": self.tokenizer.decode(
a_ , skip_special_tokens=a_ , )
}
records.append(a_ )
return records
| 364 |
"""simple docstring"""
import os
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
with open(os.path.dirname(__UpperCamelCase ) + "/grid.txt" ) as f:
lowerCAmelCase_ : str = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
lowerCAmelCase_ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCAmelCase_ : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCAmelCase_ : Dict = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCAmelCase_ : Union[str, Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCAmelCase_ : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCAmelCase_ : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCAmelCase_ : List[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCAmelCase_ : str = temp
return maximum
if __name__ == "__main__":
print(solution())
| 161 | 0 |
class __snake_case :
def __init__( self ):
'''simple docstring'''
lowercase : Optional[Any] = """"""
lowercase : Tuple = """"""
lowercase : List[Any] = []
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase : List[str] = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
else:
lowercase : Any = self.__min_dist_top_down_dp(snake_case ,n - 1 )
lowercase : List[Any] = self.__min_dist_top_down_dp(m - 1 ,snake_case )
lowercase : Tuple = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
lowercase : Union[str, Any] = 1 + min(snake_case ,snake_case ,snake_case )
return self.dp[m][n]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = worda
lowercase : Dict = worda
lowercase : str = [[-1 for _ in range(len(snake_case ) )] for _ in range(len(snake_case ) )]
return self.__min_dist_top_down_dp(len(snake_case ) - 1 ,len(snake_case ) - 1 )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = worda
lowercase : int = worda
lowercase : List[Any] = len(snake_case )
lowercase : Tuple = len(snake_case )
lowercase : str = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase : str = j
elif j == 0: # second string is empty
lowercase : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase : str = self.dp[i - 1][j - 1]
else:
lowercase : Dict = self.dp[i][j - 1]
lowercase : Optional[int] = self.dp[i - 1][j]
lowercase : Dict = self.dp[i - 1][j - 1]
lowercase : int = 1 + min(snake_case ,snake_case ,snake_case )
return self.dp[m][n]
if __name__ == "__main__":
lowercase : Union[str, Any] = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
lowercase : Optional[Any] = input("""Enter the first string: """).strip()
lowercase : str = input("""Enter the second string: """).strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 20 |
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self , lowercase) -> List[str]:
'''simple docstring'''
with open(lowercase , encoding='utf-8') as input_file:
a__ : Any = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)')
a__ : Optional[int] = input_file.read()
a__ : Union[str, Any] = regexp.search(lowercase)
return match
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
with open(lowercase , encoding='utf-8') as input_file:
a__ : Union[str, Any] = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL)
a__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a__ : Any = regexp.finditer(lowercase)
a__ : List[str] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = Path('./datasets')
a__ : int = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase)):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}')
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Optional[int] = Path('./datasets')
a__ : List[str] = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase)):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 225 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ) -> Tuple:
'''simple docstring'''
a__ : str = parent
a__ : Optional[Any] = batch_size
a__ : str = seq_length
a__ : int = is_training
a__ : str = use_attention_mask
a__ : List[str] = use_token_type_ids
a__ : Optional[Any] = use_labels
a__ : List[Any] = vocab_size
a__ : Tuple = hidden_size
a__ : Dict = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : int = intermediate_size
a__ : Any = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : Optional[int] = type_vocab_size
a__ : List[Any] = type_sequence_label_size
a__ : Union[str, Any] = initializer_range
a__ : str = num_choices
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : Dict = None
if self.use_attention_mask:
a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
a__ : Dict = None
if self.use_token_type_ids:
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : Optional[int] = config_and_inputs
a__ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : int = config_and_inputs
a__ : str = True
a__ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Optional[Any] = True
__A : Tuple = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = FlaxBertModelTester(self)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = FlaxBertModel.from_pretrained('bert-base-cased')
a__ : Optional[Any] = model(np.ones((1, 1)))
self.assertIsNotNone(lowercase)
| 225 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 | '''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase="swish" , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , __lowerCAmelCase=0.2_5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , ) -> List[Any]:
lowercase__ : List[str] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[str] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : List[str] = make_divisible(512 * width_multiplier , divisor=8 )
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = conv_kernel_size
lowercase__ : Dict = output_stride
lowercase__ : List[Any] = classifier_dropout_prob
lowercase__ : str = use_labels
lowercase__ : List[Any] = is_training
lowercase__ : Tuple = num_labels
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = scope
lowercase__ : List[Any] = width_multiplier
lowercase__ : Optional[int] = ffn_dropout
lowercase__ : int = attn_dropout
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Any = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase( self ) -> Tuple:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Dict = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : str = self.num_labels
lowercase__ : List[Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> int:
lowercase__ : Tuple = MobileViTVaModelTester(self )
lowercase__ : Any = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _lowerCAmelCase( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _lowerCAmelCase( self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _lowerCAmelCase( self ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> str:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : Optional[int] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : str = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**__lowerCAmelCase )
# verify the logits
lowercase__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase__ : int = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = model.to(__lowerCAmelCase )
lowercase__ : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : str = prepare_img()
lowercase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**__lowerCAmelCase )
lowercase__ : Tuple = outputs.logits
# verify the logits
lowercase__ : List[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : List[str] = model.to(__lowerCAmelCase )
lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = prepare_img()
lowercase__ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : Optional[int] = outputs.logits.detach().cpu()
lowercase__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
lowercase__ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
lowercase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 198 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_=None , ) -> List[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = embedding_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __snake_case ( self ) -> Dict:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = MegatronBertModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ )
lowerCAmelCase = model(A_ , token_type_ids=A_ )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = MegatronBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = MegatronBertForCausalLM(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
lowerCAmelCase = MegatronBertForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
lowerCAmelCase = MegatronBertForPreTraining(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
lowerCAmelCase = MegatronBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MegatronBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MegatronBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
lowerCAmelCase = self.num_choices
lowerCAmelCase = MegatronBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Any = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : str = True
# test_resize_embeddings = False
UpperCAmelCase : Union[str, Any] = False
def __snake_case ( self , A_ , A_ , A_=False ) -> List[Any]:
lowerCAmelCase = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __snake_case ( self ) -> str:
lowerCAmelCase = MegatronBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __snake_case ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A_ )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A_ )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A_ )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A_ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
return torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowerCAmelCase = os.path.join(os.environ["""MYDIR"""] , A_ )
lowerCAmelCase = MegatronBertModel.from_pretrained(A_ )
model.to(A_ )
model.half()
lowerCAmelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase = model(A_ )[0]
lowerCAmelCase = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A_ )
lowerCAmelCase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
lowerCAmelCase = output[0, ii, jj]
lowerCAmelCase = expected[3 * ii + jj]
lowerCAmelCase = """ii={} jj={} a={} b={}""".format(A_ , A_ , A_ , A_ )
self.assertTrue(math.isclose(A_ , A_ , rel_tol=A_ , abs_tol=A_ ) , msg=A_ ) | 187 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : int | str ) -> bool:
"""simple docstring"""
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 1_000_000 ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip())))) | 187 | 1 |
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__(self ):
"""simple docstring"""
UpperCAmelCase__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
UpperCAmelCase__ : int = False
def _a (self , _lowerCamelCase ):
"""simple docstring"""
for word in words:
self.insert(_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase__ : Optional[int] = TrieNode()
UpperCAmelCase__ : int = curr.nodes[char]
UpperCAmelCase__ : int = True
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase__ : Tuple = curr.nodes[char]
return curr.is_leaf
def _a (self , _lowerCamelCase ):
"""simple docstring"""
def _delete(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
if index == len(_lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase__ : List[Any] = False
return len(curr.nodes ) == 0
UpperCAmelCase__ : List[Any] = word[index]
UpperCAmelCase__ : Union[str, Any] = curr.nodes.get(_lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase__ : Dict = _delete(_lowerCamelCase , _lowerCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowerCamelCase , 0 )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
if node.is_leaf:
print(lowerCAmelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(lowerCAmelCase , word + key )
def a__ ( ) -> bool:
UpperCAmelCase__ : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
UpperCAmelCase__ : Dict = TrieNode()
root.insert_many(lowerCAmelCase )
# print_words(root, "")
assert all(root.find(lowerCAmelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
print(str(lowerCAmelCase ) , """works!""" if passes else """doesn't work :(""" )
def a__ ( ) -> None:
assert test_trie()
def a__ ( ) -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 171 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_A = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : Dict = _TestCommandArgs(dataset=lowerCAmelCase , all_configs=lowerCAmelCase , save_infos=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = TestCommand(*lowerCAmelCase )
test_command.run()
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase , """README.md""" )
assert os.path.exists(lowerCAmelCase )
UpperCAmelCase__ : List[str] = DatasetInfosDict.from_directory(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_35_15_63,
"""num_examples""": 1_00_00,
},
{
"""name""": """validation""",
"""num_bytes""": 23_84_18,
"""num_examples""": 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = getattr(dataset_infos["""default"""] , lowerCAmelCase ), getattr(expected_dataset_infos["""default"""] , lowerCAmelCase )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase , lowerCAmelCase )
elif key == "splits":
assert list(lowerCAmelCase ) == list(lowerCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 171 | 1 |
from math import isqrt
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCAmelCase ) + 1 ) )
def UpperCAmelCase_ ( __UpperCAmelCase : int = 10**6 ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''') | 365 |
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> dict[str, str]:
SCREAMING_SNAKE_CASE_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
SCREAMING_SNAKE_CASE_ = remove_duplicates(key.upper() )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
# First fill cipher with key characters
SCREAMING_SNAKE_CASE_ = {alphabet[i]: char for i, char in enumerate(__UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__UpperCAmelCase ) , 26 ):
SCREAMING_SNAKE_CASE_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
SCREAMING_SNAKE_CASE_ = alphabet[i - offset]
SCREAMING_SNAKE_CASE_ = char
return cipher_alphabet
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : dict[str, str] ) -> str:
return "".join(cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : dict[str, str] ) -> str:
SCREAMING_SNAKE_CASE_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def UpperCAmelCase_ ( ) -> None:
SCREAMING_SNAKE_CASE_ = input('Enter message to encode or decode: ' ).strip()
SCREAMING_SNAKE_CASE_ = input('Enter keyword: ' ).strip()
SCREAMING_SNAKE_CASE_ = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
SCREAMING_SNAKE_CASE_ = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
SCREAMING_SNAKE_CASE_ = create_cipher_map(__UpperCAmelCase )
print(func(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 210 | 0 |
from collections.abc import Iterable
from typing import Generic, TypeVar
_lowercase: int = TypeVar("_T")
class _lowercase ( Generic[_T] ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ = None ):
"""simple docstring"""
a = list(iterable or [] )
a = []
def __len__(self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__(self ):
"""simple docstring"""
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
self._stacka.append(_snake_case )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self._stacka.pop
a = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 227 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCAmelCase ( lowerCamelCase__ ):
@staticmethod
def snake_case ( _snake_case ):
"""simple docstring"""
_lowerCAmelCase = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=_snake_case , default=_snake_case , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=_snake_case , help="""Name of the model to download""" )
download_parser.set_defaults(func=_snake_case )
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = model
_lowerCAmelCase = cache
_lowerCAmelCase = force
_lowerCAmelCase = trust_remote_code
def snake_case ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 82 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __lowerCAmelCase :
# setable values
snake_case : Optional[int] = None
snake_case : Optional[jnp.ndarray] = None
snake_case : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def snake_case_ (cls ):
return cls()
@dataclass
class __lowerCAmelCase ( __a ):
snake_case : jnp.ndarray
snake_case : jnp.ndarray
snake_case : KarrasVeSchedulerState
class __lowerCAmelCase ( __a , __a ):
@property
def snake_case_ (self ):
return True
@register_to_config
def __init__(self , lowerCAmelCase__ = 0.0_2 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 1.0_0_7 , lowerCAmelCase__ = 8_0 , lowerCAmelCase__ = 0.0_5 , lowerCAmelCase__ = 5_0 , ):
pass
def snake_case_ (self ):
return KarrasVeSchedulerState.create()
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = () ):
_UpperCAmelCase : Any = jnp.arange(0 , lowerCAmelCase__ )[::-1].copy()
_UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase__ , schedule=jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase__ , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCAmelCase : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase : Dict = random.split(lowerCAmelCase__ , num=1 )
_UpperCAmelCase : Dict = self.config.s_noise * random.normal(key=lowerCAmelCase__ , shape=sample.shape )
_UpperCAmelCase : str = sigma + gamma * sigma
_UpperCAmelCase : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ):
_UpperCAmelCase : Tuple = sample_hat + sigma_hat * model_output
_UpperCAmelCase : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ):
_UpperCAmelCase : List[str] = sample_prev + sigma_prev * model_output
_UpperCAmelCase : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
raise NotImplementedError()
| 170 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase_ : Dict = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = '''cpu'''
lowerCAmelCase_ : List[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase_ : List[Any] = '''path-to-your-trained-model'''
lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase_ : str = pipe.to(device)
# to channels last
lowerCAmelCase_ : Dict = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase_ : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase_ : Optional[int] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase_ : Any = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase_ : str = torch.randn(2, 4, 64, 64)
lowerCAmelCase_ : str = torch.rand(1) * 999
lowerCAmelCase_ : Any = torch.randn(2, 77, 768)
lowerCAmelCase_ : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase_ : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ : Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase_ : str = 666
lowerCAmelCase_ : int = torch.Generator(device).manual_seed(seed)
lowerCAmelCase_ : Dict = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase_ : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase_ : Tuple = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 170 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 20
__lowerCamelCase = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase__ )
# tweak scores to not be uniform anymore
__lowerCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCamelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCamelCase = jax.nn.softmax(lowerCamelCase__ , axis=-1 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create ramp distribution
__lowerCamelCase = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCamelCase = 5
__lowerCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCamelCase = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, length) ).copy()
__lowerCamelCase = top_k_warp_safety_check(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
__lowerCamelCase = np.exp(top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
__lowerCamelCase = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCamelCase = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__lowerCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCamelCase = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
# check that min length is applied at length 5
__lowerCamelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowerCamelCase = 5
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = 15
__lowerCamelCase = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the bos_token_id score
__lowerCamelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowerCamelCase = 1
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = 5
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCamelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowerCamelCase = 4
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = 10
# no processor list
__lowerCamelCase = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# with processor list
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = 10
# no processor list
def run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
__lowerCamelCase = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
# with processor list
def run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
__lowerCamelCase = jax.jit(lowerCamelCase__ )
__lowerCamelCase = jax.jit(lowerCamelCase__ )
__lowerCamelCase = jitted_run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = jitted_run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 90 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase = min(lowerCAmelCase , lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 70 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int]=1_000 ) -> Tuple:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowerCAmelCase : str = n - 1
__lowerCAmelCase : List[str] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowerCAmelCase : Optional[int] = 0
while count < prec:
__lowerCAmelCase : List[Any] = random.randint(2 , n - 1 )
__lowerCAmelCase : List[str] = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
__lowerCAmelCase : str = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
__lowerCAmelCase : List[str] = False
break
__lowerCAmelCase : Union[str, Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_UpperCAmelCase = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 232 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self : List[str] )->Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Optional[int] = PegasusTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def UpperCAmelCase__ ( self : Optional[Any] , **_snake_case : Tuple )->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : Dict , _snake_case : List[Any] )->Tuple:
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = """</s>"""
__lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : int )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_snake_case ) , 1103 )
def UpperCAmelCase__ ( self : Optional[int] )->Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
__lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
__lowerCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
__lowerCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCAmelCase : Tuple = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
__lowerCAmelCase : List[str] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
__lowerCAmelCase : str = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : List[str] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCAmelCase : Tuple = """To ensure a smooth flow of bank resolutions."""
__lowerCAmelCase : Optional[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
__lowerCAmelCase : int = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = ["""This is going to be way too long.""" * 150, """short example"""]
__lowerCAmelCase : Union[str, Any] = ["""not super long but more than 5 tokens""", """tiny"""]
__lowerCAmelCase : Dict = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
__lowerCAmelCase : Tuple = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self : Tuple )->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Any = PegasusTokenizer(_snake_case , offset=0 , mask_token_sent=_snake_case , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : Any )->str:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def UpperCAmelCase__ ( self : Union[str, Any] , **_snake_case : Optional[Any] )->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[int] )->Union[str, Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self : List[Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : int = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
__lowerCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
__lowerCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
@require_torch
def UpperCAmelCase__ ( self : str )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = ["""This is going to be way too long.""" * 1000, """short example"""]
__lowerCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
__lowerCAmelCase : str = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
__lowerCAmelCase : List[Any] = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
__lowerCAmelCase : Optional[Any] = self._large_tokenizer(_snake_case ).input_ids
self.assertListEqual(
_snake_case , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , ) | 232 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : List[str] = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(lowercase_ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
A_ : Optional[int] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 362 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['prompt']
__SCREAMING_SNAKE_CASE : str = ['prompt']
__SCREAMING_SNAKE_CASE : Dict = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
@property
def _a (self ):
return 32
@property
def _a (self ):
return 32
@property
def _a (self ):
return self.time_input_dim * 4
@property
def _a (self ):
return 8
@property
def _a (self ):
A_ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase )
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Any = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
A_ : List[str] = PriorTransformer(**lowercase )
return model
@property
def _a (self ):
torch.manual_seed(0 )
A_ : str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
A_ : Dict = ShapERenderer(**lowercase )
return model
def _a (self ):
A_ : Optional[int] = self.dummy_prior
A_ : Optional[int] = self.dummy_text_encoder
A_ : int = self.dummy_tokenizer
A_ : Dict = self.dummy_renderer
A_ : Tuple = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
A_ : Union[str, Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a (self , lowercase , lowercase=0 ):
if str(lowercase ).startswith("""mps""" ):
A_ : Any = torch.manual_seed(lowercase )
else:
A_ : str = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : List[str] = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a (self ):
A_ : str = """cpu"""
A_ : Union[str, Any] = self.get_dummy_components()
A_ : Optional[int] = self.pipeline_class(**lowercase )
A_ : str = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Dict = pipe(**self.get_dummy_inputs(lowercase ) )
A_ : Dict = output.images[0]
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A_ : Tuple = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a (self ):
A_ : Tuple = torch_device == """cpu"""
A_ : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def _a (self ):
A_ : List[Any] = self.get_dummy_components()
A_ : Any = self.pipeline_class(**lowercase )
A_ : Any = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = 1
A_ : Union[str, Any] = 2
A_ : Dict = self.get_dummy_inputs(lowercase )
for key in inputs.keys():
if key in self.batch_params:
A_ : Optional[Any] = batch_size * [inputs[key]]
A_ : List[Any] = pipe(**lowercase , num_images_per_prompt=lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
A_ : Tuple = ShapEPipeline.from_pretrained("""openai/shap-e""" )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = torch.Generator(device=lowercase ).manual_seed(0 )
A_ : List[str] = pipe(
"""a shark""" , generator=lowercase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase , lowercase ) | 135 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Optional[int] = """timesformer"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int=224 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3072 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]="divided_space_time" , __SCREAMING_SNAKE_CASE : List[Any]=0 , **__SCREAMING_SNAKE_CASE : int , ) -> Dict:
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_frames
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = attention_type
lowerCamelCase_ = drop_path_rate
| 183 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : int ) -> list[int]:
lowerCamelCase_ = [True] * limit
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < limit:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def lowerCamelCase__ ( _lowerCamelCase : int = 1000000 ) -> int:
lowerCamelCase_ = prime_sieve(_lowerCamelCase )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length , len(_lowerCamelCase ) ):
lowerCamelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ = j - i
lowerCamelCase_ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 183 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str = "cpu" , snake_case_ : Union[str, None] = None ) -> None:
__snake_case = torch.load(__A , map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
__snake_case = v.half()
if save_path is None: # overwrite src_path
__snake_case = src_path
torch.save(__A , __A )
if __name__ == "__main__":
fire.Fire(convert)
| 357 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''bird'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''Chef in the kitchen'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 238 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'big_bird'
def __init__( self , lowercase=50358 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=4096 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=66 , lowercase="block_sparse" , lowercase=True , lowercase=False , lowercase=64 , lowercase=3 , lowercase=None , **lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , sep_token_id=lowercase , **lowercase , )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
A__ = use_cache
A__ = rescale_embeddings
A__ = attention_type
A__ = use_bias
A__ = block_size
A__ = num_random_blocks
A__ = classifier_dropout
class a__ ( snake_case ):
"""simple docstring"""
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 |
'''simple docstring'''
import numpy as np
def UpperCamelCase( UpperCAmelCase_ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( _lowerCAmelCase ):
A = (PNDMScheduler,)
A = (('''num_inference_steps''', 50),)
def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCAmelCase_: Optional[int] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def __snake_case (self, SCREAMING_SNAKE_CASE_=0, **SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: int = dict(self.forward_default_kwargs )
UpperCAmelCase_: Union[str, Any] = kwargs.pop("""num_inference_steps""", SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = self.dummy_sample
UpperCAmelCase_: Any = 0.1 * sample
UpperCAmelCase_: List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_: Optional[int] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
UpperCAmelCase_: Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
UpperCAmelCase_: List[str] = dummy_past_residuals[:]
UpperCAmelCase_: Optional[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCAmelCase_: List[Any] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_: int = scheduler.step_plms(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCAmelCase_: Union[str, Any] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __snake_case (self ) -> Union[str, Any]:
pass
def __snake_case (self, SCREAMING_SNAKE_CASE_=0, **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_: Dict = kwargs.pop("""num_inference_steps""", SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = self.dummy_sample
UpperCAmelCase_: List[str] = 0.1 * sample
UpperCAmelCase_: Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_: Tuple = self.get_scheduler_config()
UpperCAmelCase_: str = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_: Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_: int = dummy_past_residuals[:]
UpperCAmelCase_: Any = scheduler.step_prk(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCAmelCase_: List[str] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_: Tuple = scheduler.step_plms(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCAmelCase_: int = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_: int = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = 10
UpperCAmelCase_: Union[str, Any] = self.dummy_model()
UpperCAmelCase_: Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_: Tuple = kwargs.pop("""num_inference_steps""", SCREAMING_SNAKE_CASE_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_: int = self.get_scheduler_config()
UpperCAmelCase_: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = self.dummy_sample
UpperCAmelCase_: List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE_, """set_timesteps""" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE_, """set_timesteps""" ):
UpperCAmelCase_: Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_: List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase_: List[str] = dummy_past_residuals[:]
UpperCAmelCase_: Optional[int] = scheduler.step_prk(SCREAMING_SNAKE_CASE_, 0, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCAmelCase_: Any = scheduler.step_prk(SCREAMING_SNAKE_CASE_, 1, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
UpperCAmelCase_: Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE_, 0, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCAmelCase_: str = scheduler.step_plms(SCREAMING_SNAKE_CASE_, 1, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __snake_case (self ) -> str:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Dict:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_: Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_: Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps, torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ), )
def __snake_case (self ) -> Dict:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1], [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_, beta_end=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Tuple:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Tuple:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_: Optional[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_: Dict = self.dummy_sample
UpperCAmelCase_: Union[str, Any] = 0.1 * sample
UpperCAmelCase_: Optional[int] = self.get_scheduler_config()
UpperCAmelCase_: Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_: Optional[int] = scheduler.step_prk(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).prev_sample
def __snake_case (self ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_: int = self.get_scheduler_config()
UpperCAmelCase_: Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample ).prev_sample
def __snake_case (self ) -> str:
UpperCAmelCase_: List[str] = self.full_loop()
UpperCAmelCase_: Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Optional[int] = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase_: List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: List[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3
def __snake_case (self ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_: Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_, beta_start=0.0_1 )
UpperCAmelCase_: Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3
def __snake_case (self ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_: Optional[Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_, beta_start=0.0_1 )
UpperCAmelCase_: Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
| 82 |
from collections import defaultdict
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Optional[int] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCAmelCase_: List[Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE_ ) )
]
UpperCAmelCase_: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCAmelCase_: List[Any] = (1 << len(SCREAMING_SNAKE_CASE_ )) - 1
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCAmelCase_: List[Any] = self.count_ways_until(SCREAMING_SNAKE_CASE_, task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 )
# save the value.
UpperCAmelCase_: List[Any] = total_ways_util
return self.dp[mask][task_no]
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> str:
# Store the list of persons for each task
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1 )
if __name__ == "__main__":
a : Optional[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 82 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'upernet'
def __init__( self :Any , a :Union[str, Any]=None , a :str=5_1_2 , a :Any=0.02 , a :Union[str, Any]=[1, 2, 3, 6] , a :int=True , a :str=0.4 , a :Optional[Any]=3_8_4 , a :List[Any]=2_5_6 , a :Any=1 , a :Any=False , a :Union[str, Any]=2_5_5 , **a :Any , ) -> Dict:
super().__init__(**a )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCamelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a , a ):
__UpperCamelCase : str = backbone_config.get("model_type" )
__UpperCamelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : str = config_class.from_dict(a )
__UpperCamelCase : str = backbone_config
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : Optional[int] = pool_scales
__UpperCamelCase : Optional[Any] = use_auxiliary_head
__UpperCamelCase : List[Any] = auxiliary_loss_weight
__UpperCamelCase : Union[str, Any] = auxiliary_in_channels
__UpperCamelCase : str = auxiliary_channels
__UpperCamelCase : str = auxiliary_num_convs
__UpperCamelCase : Union[str, Any] = auxiliary_concat_input
__UpperCamelCase : List[str] = loss_ignore_index
def _lowerCamelCase ( self :List[str] ) -> Tuple:
__UpperCamelCase : int = copy.deepcopy(self.__dict__ )
__UpperCamelCase : Any = self.backbone_config.to_dict()
__UpperCamelCase : Any = self.__class__.model_type
return output | 232 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int]]) -> bool:
'''simple docstring'''
__UpperCamelCase : Any = len(_lowerCamelCase)
# We need to create solution object to save path.
__UpperCamelCase : List[str] = [[0 for _ in range(_lowerCamelCase)] for _ in range(_lowerCamelCase)]
__UpperCamelCase : Optional[int] = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase)
if solved:
print("\n".join(str(_lowerCamelCase) for row in solutions))
else:
print("No solution exists!")
return solved
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[list[int]]) -> bool:
'''simple docstring'''
__UpperCamelCase : Tuple = len(_lowerCamelCase)
# Final check point.
if i == j == (size - 1):
__UpperCamelCase : Optional[int] = 1
return True
__UpperCamelCase : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds
__UpperCamelCase : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__UpperCamelCase : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__UpperCamelCase : Tuple = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase)
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase)
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase)
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase)
):
return True
__UpperCamelCase : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
A__ : Optional[Any] = logging.get_logger(__name__)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , *__a : Optional[Any] , **__a : List[str] ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 352 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_snake_case = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase :
def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ) -> Optional[Any]:
_A : str = d_model
_A : Any = parent
_A : List[str] = batch_size
_A : Any = prediction_length
_A : str = context_length
_A : Any = cardinality
_A : str = num_time_features
_A : str = lags_sequence
_A : List[Any] = embedding_dimension
_A : int = is_training
_A : Tuple = hidden_size
_A : Any = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : Tuple = intermediate_size
_A : List[Any] = hidden_act
_A : Tuple = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Any = context_length
_A : str = prediction_length + label_length
_A : int = label_length
_A : List[str] = moving_average
_A : Dict = autocorrelation_factor
def a__ ( self ) -> List[str]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ ( self , _a ) -> Optional[int]:
_A : int = config.context_length + max(config.lags_sequence )
_A : Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_A : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_A : Optional[Any] = floats_tensor([self.batch_size, _past_length] )
_A : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_A : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_A : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] )
_A : str = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def a__ ( self ) -> Tuple:
_A : List[Any] = self.get_config()
_A : int = self.prepare_autoformer_inputs_dict(_a )
return config, inputs_dict
def a__ ( self ) -> Optional[int]:
_A , _A : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : Dict = AutoformerModel(config=_a ).to(_a ).eval()
_A : int = model(**_a )
_A : str = outputs.encoder_last_hidden_state
_A : Optional[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_A : str = model.get_encoder()
encoder.save_pretrained(_a )
_A : Optional[Any] = AutoformerEncoder.from_pretrained(_a ).to(_a )
_A , _A , _A , _A , _A : Optional[int] = model.create_network_inputs(**_a )
_A , _A : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_A : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_A : str = encoder(inputs_embeds=_a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_A : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_A : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_A : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_A : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = model.get_decoder()
decoder.save_pretrained(_a )
_A : Tuple = AutoformerDecoder.from_pretrained(_a ).to(_a )
_A : List[Any] = decoder(
trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_a = (AutoformerForPrediction,) if is_torch_available() else ()
_a = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : Optional[int] = AutoformerModelTester(self )
_A : Union[str, Any] = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a__ ( self ) -> int:
self.config_tester.run_common_tests()
def a__ ( self ) -> Optional[int]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
_A , _A : Any = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info["""missing_keys"""] , [] )
def a__ ( self ) -> str:
_A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> str:
_A : Union[str, Any] = inspect.signature(getattr(_a , """forward""" ) )
# The main input is the name of the argument after `self`
_A : Union[str, Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _a )
def a__ ( self ) -> List[Any]:
_A , _A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : str = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[str] = [*signature.parameters.keys()]
_A : Tuple = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(_a )] , _a )
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = True
_A : str = getattr(self.model_tester , """seq_length""" , _a )
_A : Dict = getattr(self.model_tester , """decoder_seq_length""" , _a )
_A : str = getattr(self.model_tester , """encoder_seq_length""" , _a )
_A : List[Any] = getattr(self.model_tester , """d_model""" , _a )
_A : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , _a )
_A : List[str] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_A : Optional[Any] = True
_A : List[str] = False
_A : Optional[int] = True
_A : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : List[Any] = model(**self._prepare_for_class(_a , _a ) )
_A : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A : Dict = True
_A : List[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : int = model(**self._prepare_for_class(_a , _a ) )
_A : Tuple = outputs.encoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_A : List[str] = len(_a )
_A : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_a , _a )
# decoder attentions
_A : Dict = outputs.decoder_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_A : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_A : Dict = True
_A : Any = True
_A : str = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : List[str] = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 2 , len(_a ) )
_A : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase_ ( snake_case_="train-batch.pt" ):
_A : Optional[int] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""",filename=snake_case_,repo_type="""dataset""" )
_A : List[str] = torch.load(snake_case_,map_location=snake_case_ )
return batch
@require_torch
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Any:
_A : Optional[int] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a )
_A : Any = prepare_batch()
with torch.no_grad():
_A : Union[str, Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_A : List[Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _a )
_A : str = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def a__ ( self ) -> Optional[Any]:
_A : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a )
_A : Optional[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_A : List[str] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_A : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _a )
_A : Tuple = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a )
_A : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_A : str = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_A : str = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _a )
_A : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_a )
_A : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1e-1 ) )
| 26 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ):
_A : Union[str, Any] = []
for k, v in d.items():
_A : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
_A : List[Any] = argparse.Namespace()
with open(snake_case_,"""r""" ) as yaml_file:
try:
_A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader )
_A : Optional[int] = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_,snake_case_,snake_case_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = MobileViTVaConfig()
_A : Tuple = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_A : Dict = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : int = 384
else:
_A : int = 256
_A : List[str] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_A : Union[str, Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : str = 384
else:
_A : List[Any] = 256
_A : List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_A : int = 151
_A : int = 512
_A : Optional[int] = """ade20k-id2label.json"""
_A : Any = True
elif task_name.startswith("""voc_""" ):
_A : List[Any] = 21
_A : Dict = 512
_A : Dict = """pascal-voc-id2label.json"""
_A : int = True
# orig_config
_A : Any = load_orig_config_file(snake_case_ )
assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model"
_A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 )
assert (
getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 )
if "_deeplabv3" in task_name:
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] )
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 )
_A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 )
# id2label
_A : List[Any] = """huggingface/label-files"""
_A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
if base_model:
_A : Optional[int] = """"""
else:
_A : Dict = """mobilevitv2."""
_A : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A : Any = k[8:]
else:
_A : List[str] = k
if ".block." in k:
_A : Any = k_new.replace(""".block.""",""".""" )
if ".conv." in k:
_A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" )
if ".norm." in k:
_A : Any = k_new.replace(""".norm.""",""".normalization.""" )
if "conv_1." in k:
_A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" )
if ".red_1x1." in k:
_A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
_A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
_A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
_A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_A : Optional[int] = [0, 1]
elif i == 4:
_A : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_A : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
_A : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" )
if "pre_norm_ffn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" )
if "classifier.1." in k:
_A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" )
if "seg_head." in k:
_A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" )
if ".aspp_layer." in k:
_A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" )
if ".aspp_pool." in k:
_A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
_A : str = False
else:
_A : int = MobileViTVaForImageClassification(snake_case_ ).eval()
_A : List[Any] = False
# remove and rename some keys of load the original model
_A : List[Any] = checkpoint
remove_unused_keys(snake_case_ )
_A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 )
_A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[Any] = model(**snake_case_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_A : List[Any] = outputs.logits
_A : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""",model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 26 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : Dict = parent
A : int = batch_size
A : List[Any] = seq_length
A : Optional[Any] = is_training
A : Union[str, Any] = use_input_mask
A : Any = use_token_type_ids
A : List[Any] = use_labels
A : List[str] = vocab_size
A : int = hidden_size
A : Optional[int] = num_hidden_layers
A : List[str] = num_attention_heads
A : Tuple = intermediate_size
A : int = hidden_act
A : str = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Tuple = max_position_embeddings
A : List[str] = type_vocab_size
A : Any = type_sequence_label_size
A : int = initializer_range
A : int = num_labels
A : List[Any] = num_choices
A : int = scope
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = None
if self.use_input_mask:
A : Any = random_attention_mask([self.batch_size, self.seq_length] )
A : Any = None
if self.use_token_type_ids:
A : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Optional[int] = None
A : str = None
A : Optional[Any] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : Union[str, Any] = OpenLlamaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : Any = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
A : Optional[Any] = True
A : Optional[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : Union[str, Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Any = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
A : str = True
A : Dict = True
A : Dict = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
A : Union[str, Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : int = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : str = self.prepare_config_and_inputs()
(
A
) : Union[str, Any] = config_and_inputs
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__magic_name__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : List[Any] = OpenLlamaModelTester(self )
A : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : int = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = 3
A : Optional[Any] = input_dict['''input_ids''']
A : Optional[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A : Union[str, Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : List[str] = 3
A : int = '''single_label_classification'''
A : Dict = input_dict['''input_ids''']
A : List[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A : Tuple = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : Any = 3
A : Union[str, Any] = '''multi_label_classification'''
A : Any = input_dict['''input_ids''']
A : Optional[int] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A : Union[str, Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : Optional[Any] = ids_tensor([1, 10] , config.vocab_size )
A : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : Optional[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
original_model.eval()
A : Dict = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state
A : str = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : Dict = {'''type''': scaling_type, '''factor''': 10.0}
A : List[str] = OpenLlamaModel(SCREAMING_SNAKE_CASE )
scaled_model.to(SCREAMING_SNAKE_CASE )
scaled_model.eval()
A : Optional[Any] = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state
A : List[str] = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
| 369 |
'''simple docstring'''
from __future__ import annotations
from random import random
class A :
def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = value
A : Any = random()
A : Node | None = None
A : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
A : Optional[Any] = str(self.value ) + ''' '''
A : Union[str, Any] = str(self.left or '''''' )
A : Any = str(self.right or '''''' )
return value + left + right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A, A : Any = split(root.left , snake_case__ )
return left, root
else:
A, A : Optional[int] = split(root.right , snake_case__ )
return root, right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A : List[str] = merge(left.right , snake_case__ )
return left
else:
A : Tuple = merge(snake_case__ , right.left )
return right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = Node(snake_case__ )
A, A : Tuple = split(snake_case__ , snake_case__ )
return merge(merge(snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : Dict = split(snake_case__ , value - 1 )
A, A : Any = split(snake_case__ , snake_case__ )
return merge(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
A : int = insert(snake_case__ , int(arg[1:] ) )
elif arg[0] == "-":
A : int = erase(snake_case__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
A : Optional[int] = input()
while args != "q":
A : str = interact_treap(snake_case__ , snake_case__ )
print(snake_case__ )
A : Union[str, Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Union[str, Any] = logging.get_logger(__name__)
def A ( snake_case__ , snake_case__=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def A ( snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ = """"""
else:
SCREAMING_SNAKE_CASE__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-config.hidden_size :]
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dct.pop(snake_case__ )
SCREAMING_SNAKE_CASE__ = val
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=snake_case__ , )
SCREAMING_SNAKE_CASE__ = ViTHybridConfig(backbone_config=snake_case__ , image_size=3_84 , num_labels=10_00 )
SCREAMING_SNAKE_CASE__ = False
# load original model from timm
SCREAMING_SNAKE_CASE__ = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
SCREAMING_SNAKE_CASE__ = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(snake_case__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ = ViTHybridModel(snake_case__ ).eval()
else:
SCREAMING_SNAKE_CASE__ = ViTHybridForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# create image processor
SCREAMING_SNAKE_CASE__ = create_transform(**resolve_data_config({} , model=snake_case__ ) )
SCREAMING_SNAKE_CASE__ = transform.transforms
SCREAMING_SNAKE_CASE__ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE__ = ViTHybridImageProcessor(
do_resize=snake_case__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=snake_case__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = transform(snake_case__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = processor(snake_case__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case__ , snake_case__ )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(snake_case__ )
SCREAMING_SNAKE_CASE__ = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
SCREAMING_SNAKE_CASE__ = timm_model.forward_features(snake_case__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case__ , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE__ = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 165 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ = json.load(snake_case__ )
SCREAMING_SNAKE_CASE__ = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , map_location="""cpu""" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ = load_entity_vocab(snake_case__ )
SCREAMING_SNAKE_CASE__ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ = AddedToken("""<ent>""" , lstrip=snake_case__ , rstrip=snake_case__ )
SCREAMING_SNAKE_CASE__ = AddedToken("""<ent2>""" , lstrip=snake_case__ , rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ = state_dict["""embeddings.word_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ = f"""encoder.layer.{layer_index}.attention.self."""
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ = entity_emb[entity_vocab["""[MASK]"""]]
SCREAMING_SNAKE_CASE__ = LukeModel(config=snake_case__ ).eval()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(snake_case__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
SCREAMING_SNAKE_CASE__ = LukeTokenizer.from_pretrained(snake_case__ , task="""entity_classification""" )
SCREAMING_SNAKE_CASE__ = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
SCREAMING_SNAKE_CASE__ = (39, 42)
SCREAMING_SNAKE_CASE__ = tokenizer(snake_case__ , entity_spans=[span] , add_prefix_space=snake_case__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ = torch.Size((1, 42, 10_24) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 42, 7_68) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1, 10_24) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line.rstrip().split("""\t""" )
SCREAMING_SNAKE_CASE__ = index
return entity_vocab
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 165 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ : Tuple = 5_00_00
lowercase__ : Optional[int] = 50_00
lowercase__ , lowercase__ : str = os.path.split(__file__)
lowercase__ : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a__ ( lowercase : Dict, lowercase : List[Any] ) -> Any:
"""simple docstring"""
for i in range(_lowerCAmelCase ):
_UpperCamelCase = dataset[i]
@get_duration
def a__ ( lowercase : Dict, lowercase : Optional[Any], lowercase : str ) -> Union[str, Any]:
"""simple docstring"""
for i in range(0, len(_lowerCAmelCase ), _lowerCAmelCase ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def a__ ( lowercase : Optional[Any], lowercase : str, lowercase : str ) -> Dict:
"""simple docstring"""
with dataset.formatted_as(type=_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
_UpperCamelCase = dataset[i]
@get_duration
def a__ ( lowercase : List[Any], lowercase : Tuple, lowercase : Tuple, lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=_lowerCAmelCase ):
for i in range(0, _lowerCAmelCase, _lowerCAmelCase ):
_UpperCamelCase = dataset[i : i + batch_size]
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
_UpperCamelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
_UpperCamelCase = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(_lowerCAmelCase, '''dataset.arrow''' ), _lowerCAmelCase, num_examples=_lowerCAmelCase, seq_shapes={'''list''': (100,)}, )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__, str(_lowerCAmelCase ) )
_UpperCamelCase = func(_lowerCAmelCase, **_lowerCAmelCase )
print('''shuffling dataset''' )
_UpperCamelCase = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''', func.__name__, str(_lowerCAmelCase ) )
_UpperCamelCase = func(
_lowerCAmelCase, **_lowerCAmelCase )
with open(_lowerCAmelCase, '''wb''' ) as f:
f.write(json.dumps(_lowerCAmelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 371 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase__ : Any = logging.getLogger(__name__)
def a__ ( lowercase : Optional[Any], lowercase : Tuple ) -> Any:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
_snake_case : str = field(metadata={'help': 'Should contain the data files for the task.'} )
_snake_case : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', lowercase )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(lowercase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowercase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=lowercase, cache_dir=model_args.cache_dir, )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(lowercase, p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(lowercase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=lowercase, args=lowercase, train_dataset=lowercase, eval_dataset=lowercase, compute_metrics=lowercase, data_collator=lowercase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(lowercase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', lowercase, lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase )
return results
def a__ ( lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 287 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def UpperCamelCase_ ( self : Optional[Any] ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase_ ( self : Optional[int] ):
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def UpperCamelCase_ ( self : List[Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def UpperCamelCase_ ( self : Dict ):
__A = torch.arange(self.height * self.width )
__A = torch.stack(
[
pixel_indices % self.width,
torch.div(A ,self.width ,rounding_mode="trunc" ),
] ,axis=1 ,)
return coords
@property
def UpperCamelCase_ ( self : Any ):
__A , *__A = self.shape
__A = int(np.prod(A ) )
__A = self.get_image_coords()
__A = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
__A = self.get_camera_rays(A )
__A = rays.view(A ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def UpperCamelCase_ ( self : str ,A : torch.Tensor ):
__A , *__A , __A = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__A = coords.view(A ,-1 ,2 )
__A = self.resolution()
__A = self.fov()
__A = (flat.float() / (res - 1)) * 2 - 1
__A = fracs * torch.tan(fov / 2 )
__A = fracs.view(A ,-1 ,2 )
__A = (
self.z.view(A ,1 ,3 )
+ self.x.view(A ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(A ,1 ,3 ) * fracs[:, :, 1:]
)
__A = directions / directions.norm(dim=-1 ,keepdim=A )
__A = torch.stack(
[
torch.broadcast_to(self.origin.view(A ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(A ,*A ,2 ,3 )
def UpperCamelCase_ ( self : Optional[Any] ,A : int ,A : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=A ,height=A ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def UpperCAmelCase ( a_ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
__A = []
__A = []
__A = []
__A = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__A = np.array([np.sin(a_ ), np.cos(a_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__A = -z * 4
__A = np.array([np.cos(a_ ), -np.sin(a_ ), 0.0] )
__A = np.cross(a_ , a_ )
origins.append(a_ )
xs.append(a_ )
ys.append(a_ )
zs.append(a_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a_ , axis=0 ) ).float() , width=a_ , height=a_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a_ )) , )
| 15 |
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 ,999 )
for b in range(lowercase ,999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 124 | 0 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
snake_case_ ,snake_case_ = grid.shape
snake_case_ = [-1, 1, 0, 0]
snake_case_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
snake_case_ ,snake_case_ = [(0, source)], set()
snake_case_ = np.full((rows, cols), np.inf )
snake_case_ = 0
snake_case_ = np.empty((rows, cols), dtype=__UpperCAmelCase )
snake_case_ = None
while queue:
((snake_case_) ,(snake_case_)) = heappop(__UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
snake_case_ = []
while (x, y) != source:
path.append((x, y) )
snake_case_ ,snake_case_ = predecessors[x, y]
path.append(__UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCAmelCase ) ):
snake_case_ ,snake_case_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
snake_case_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCAmelCase, (dist + 1, (nx, ny)) )
snake_case_ = dist + 1
snake_case_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> list[list]:
'''simple docstring'''
snake_case_ = current_set.copy()
for row_index, row in enumerate(__UpperCAmelCase ):
snake_case_ = row[0]
for column_index, column in enumerate(__UpperCAmelCase ):
if magnitude == 0:
snake_case_ = column
continue
snake_case_ = column / magnitude
# Subtract to cancel term
snake_case_ = current_set[0]
snake_case_ = [first_row]
snake_case_ = current_set[1::]
for row in current_set:
snake_case_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__UpperCAmelCase )
continue
for column_index in range(len(__UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
snake_case_ = final_set[0]
snake_case_ = []
snake_case_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
snake_case_ = simplify(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, __UpperCAmelCase )
snake_case_ = resultant
return final_set
def __magic_name__ ( __UpperCAmelCase ) -> list:
'''simple docstring'''
if len(__UpperCAmelCase ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
snake_case_ = len(__UpperCAmelCase ) + 1
if any(len(__UpperCAmelCase ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__UpperCAmelCase, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
snake_case_ = equations.copy()
if any(0 in row for row in data_set ):
snake_case_ = data_set.copy()
snake_case_ = []
for row_index, row in enumerate(__UpperCAmelCase ):
if 0 not in row:
snake_case_ = data_set.pop(__UpperCAmelCase )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, __UpperCAmelCase )
snake_case_ = data_set.copy()
snake_case_ = simplify(__UpperCAmelCase )
snake_case_ = simplified[::-1]
snake_case_ = []
for row in simplified:
snake_case_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
snake_case_ = row.copy()[: len(__UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__UpperCAmelCase ) == 0:
solutions.append(0 )
continue
snake_case_ = temp_row[1::]
snake_case_ = temp_row[::-1]
for column_index, column in enumerate(__UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(__UpperCAmelCase )
snake_case_ = []
for item in solutions:
final.append(float(round(__UpperCAmelCase, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : str = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 163 |
import math
def _a ( a :int ) -> list:
a = [True] * n
a = False
a = False
a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a = i * 2
while index < n:
a = False
a = index + i
a = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def _a ( a :int = 999_966_663_333 ) -> int:
a = math.floor(math.sqrt(a ) ) + 100
a = prime_sieve(a )
a = 0
a = 0
a = primes[prime_index]
while (last_prime**2) <= limit:
a = primes[prime_index + 1]
a = last_prime**2
a = next_prime**2
# Get numbers divisible by lps(current)
a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
from collections import deque
from .hash_table import HashTable
class __A ( __snake_case ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCAmelCase_ )
lowerCamelCase =self.values[key]
def _snake_case ( self ):
return (
sum(self.charge_factor - len(UpperCAmelCase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCAmelCase_ ) == 0
):
return key
return super()._collision_resolution(UpperCAmelCase_ , UpperCAmelCase_ )
| 362 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ : List[Any] =logging.get_logger(__name__)
UpperCAmelCase__ : Dict ={'''vocab_file''': '''spiece.model'''}
UpperCAmelCase__ : Dict ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
UpperCAmelCase__ : List[str] ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
UpperCAmelCase__ : Any =0
UpperCAmelCase__ : List[Any] =1
UpperCAmelCase__ : Union[str, Any] =2
UpperCAmelCase__ : Tuple =3
UpperCAmelCase__ : int =4
class __A ( a ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = """left"""
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<sep>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<cls>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=["<eop>", "<eod>"] , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
lowerCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
lowerCamelCase =3
lowerCamelCase =do_lower_case
lowerCamelCase =remove_space
lowerCamelCase =keep_accents
lowerCamelCase =vocab_file
lowerCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def _snake_case ( self ):
return len(self.sp_model )
def _snake_case ( self ):
lowerCamelCase ={self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase =self.__dict__.copy()
lowerCamelCase =None
return state
def __setstate__( self , UpperCAmelCase_ ):
lowerCamelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase ={}
lowerCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , UpperCAmelCase_ ):
if self.remove_space:
lowerCamelCase =""" """.join(inputs.strip().split() )
else:
lowerCamelCase =inputs
lowerCamelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase =unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
lowerCamelCase ="""""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
lowerCamelCase =outputs.lower()
return outputs
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =self.preprocess_text(UpperCAmelCase_ )
lowerCamelCase =self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
lowerCamelCase =[]
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase =cur_pieces[1:]
else:
lowerCamelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def _snake_case ( self , UpperCAmelCase_ ):
return self.sp_model.PieceToId(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase ="""""".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , """ """ ).strip()
return out_string
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , **UpperCAmelCase_ , ):
lowerCamelCase =kwargs.pop("""use_source_tokenizer""" , UpperCAmelCase_ )
lowerCamelCase =self.convert_ids_to_tokens(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase =[]
lowerCamelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
lowerCamelCase =[]
sub_texts.append(UpperCAmelCase_ )
else:
current_sub_text.append(UpperCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase ="""""".join(UpperCAmelCase_ )
lowerCamelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase =self.clean_up_tokenization(UpperCAmelCase_ )
return clean_text
else:
return text
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1]
return ([0] * len(UpperCAmelCase_ )) + [1, 1]
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase =os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
lowerCamelCase =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 262 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Dict = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( _lowerCamelCase , unittest.TestCase ):
__magic_name__ = PegasusTokenizer
__magic_name__ = PegasusTokenizerFast
__magic_name__ = True
__magic_name__ = True
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A : int = PegasusTokenizer(_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = '''</s>'''
A : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_UpperCamelCase ) , 1103 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
A : Any = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
A : List[Any] = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids[0]
A : Any = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
A : List[Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
A : Optional[int] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
A : List[str] = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase ).input_ids[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : List[str] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
A : Union[str, Any] = '''To ensure a smooth flow of bank resolutions.'''
A : Dict = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
A : Dict = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase ).input_ids[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : str = ['''This is going to be way too long.''' * 150, '''short example''']
A : Union[str, Any] = ['''not super long but more than 5 tokens''', '''tiny''']
A : str = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='''pt''' )
A : Optional[Any] = self._large_tokenizer(
text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCamelCase ) == 2 # input_ids, attention_mask.
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : int = {'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class A ( _lowerCamelCase , unittest.TestCase ):
__magic_name__ = PegasusTokenizer
__magic_name__ = PegasusTokenizerFast
__magic_name__ = True
__magic_name__ = True
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A : int = PegasusTokenizer(_UpperCamelCase , offset=0 , mask_token_sent=_UpperCamelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
A : Union[str, Any] = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
A : str = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids[0]
A : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_torch
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = ['''This is going to be way too long.''' * 1000, '''short example''']
A : str = ['''not super long but more than 5 tokens''', '''tiny''']
A : List[Any] = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='''pt''' )
A : Optional[Any] = self._large_tokenizer(
text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCamelCase ) == 2 # input_ids, attention_mask.
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
A : str = self._large_tokenizer(_UpperCamelCase ).input_ids
self.assertListEqual(
_UpperCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 3 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : Tuple = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _a ( lowerCamelCase: Any , lowerCamelCase: Union[str, Any] , lowerCamelCase: int=None , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Tuple=None , lowerCamelCase: Union[str, Any]=None , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
__A = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__A = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__A = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
def __init__(self :int , _UpperCamelCase :Optional[int] , _UpperCamelCase :Dict=13 , _UpperCamelCase :Optional[Any]=7 , _UpperCamelCase :str=True , _UpperCamelCase :Tuple=False , _UpperCamelCase :int=99 , _UpperCamelCase :int=16 , _UpperCamelCase :int=2 , _UpperCamelCase :int=4 , _UpperCamelCase :str=4 , _UpperCamelCase :Dict="gelu" , _UpperCamelCase :int=0.1 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :Union[str, Any]=32 , _UpperCamelCase :Any=2 , _UpperCamelCase :Union[str, Any]=1 , _UpperCamelCase :Tuple=0 , _UpperCamelCase :List[str]=0.0_2 , )-> str:
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
__A = initializer_range
def _lowerCAmelCase (self :Optional[int] )-> Union[str, Any]:
__A = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__A = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__A = shift_tokens_right(_UpperCamelCase , 1 , 2 )
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , )
__A = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def _lowerCAmelCase (self :Union[str, Any] )-> Tuple:
__A , __A = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int] )-> str:
__A = 20
__A = model_class_name(_UpperCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] )
__A , __A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__A = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
__A = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__A = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__A = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
__A = model.decode(_UpperCamelCase , _UpperCamelCase )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :int , _UpperCamelCase :List[str] , _UpperCamelCase :Any )-> Dict:
__A = 20
__A = model_class_name(_UpperCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] )
__A , __A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__A = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__A = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__A = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__A = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
__A = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class A_ ( unittest.TestCase ):
lowerCAmelCase__ = 99
def _lowerCAmelCase (self :Dict )-> int:
__A = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__A = input_ids.shape[0]
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCAmelCase (self :Any )-> List[Any]:
__A , __A , __A = self._get_config_and_data()
__A = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
__A = lm_model(input_ids=_UpperCamelCase )
__A = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def _lowerCAmelCase (self :int )-> Dict:
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__A = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
__A = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__A = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__A = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
__A = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def _lowerCAmelCase (self :Tuple )-> Tuple:
__A = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__A = shift_tokens_right(_UpperCamelCase , 1 , 2 )
__A = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
__A = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( _lowerCamelCase , unittest.TestCase , _lowerCamelCase ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _lowerCAmelCase (self :List[str] )-> Optional[int]:
__A = FlaxBlenderbotModelTester(self )
def _lowerCAmelCase (self :List[str] )-> List[str]:
__A , __A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :Dict )-> List[str]:
__A , __A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> Union[str, Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
__A = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(_UpperCamelCase :int , _UpperCamelCase :int=None , **_UpperCamelCase :Dict ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
__A = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__A = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCAmelCase (self :List[str] )-> List[Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = model_class(_UpperCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__A = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase :str , _UpperCamelCase :Tuple , _UpperCamelCase :Dict ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
__A = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__A = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCAmelCase (self :int )-> Optional[int]:
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__A = np.ones((1, 1) ) * model.config.eos_token_id
__A = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def _lowerCAmelCase (self :Dict )-> List[str]:
__A = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
__A = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__A = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_UpperCamelCase )
__A = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__A = ['''Sam''']
__A = tokenizer(_UpperCamelCase , return_tensors='''jax''' )
__A = model.generate(**_UpperCamelCase , **_UpperCamelCase )
__A = '''Sam is a great name. It means "sun" in Gaelic.'''
__A = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 117 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = ["pixel_values"]
def __init__( self: str , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: List[str] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
A__ = size if size is not None else {"""height""": 2_24, """width""": 2_24}
A__ = get_size_dict(UpperCamelCase )
A__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" )
A__ = do_resize
A__ = do_rescale
A__ = do_normalize
A__ = do_center_crop
A__ = crop_size
A__ = size
A__ = resample
A__ = rescale_factor
A__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ):
"""simple docstring"""
A__ = get_size_dict(UpperCamelCase )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
A__ = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[str] , ):
"""simple docstring"""
A__ = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Dict , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: int ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[str] , ):
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: int = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: str , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase )
A__ = resample if resample is not None else self.resample
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCamelCase )
if not is_batched(UpperCamelCase ):
A__ = [images]
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
A__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
A__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
A__ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 69 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
SCREAMING_SNAKE_CASE_ : Any = data_utils.TransfoXLTokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = data_utils.TransfoXLCorpus
SCREAMING_SNAKE_CASE_ : str = data_utils
SCREAMING_SNAKE_CASE_ : List[Any] = data_utils
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase_ , """rb""" ) as fp:
A__ = pickle.load(UpperCAmelCase_ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
A__ = corpus.vocab.__dict__
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCAmelCase_ )
A__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A__ = os.path.abspath(UpperCAmelCase_ )
A__ = os.path.abspath(UpperCAmelCase_ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A__ = TransfoXLConfig()
else:
A__ = TransfoXLConfig.from_json_file(UpperCAmelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
A__ = TransfoXLLMHeadModel(UpperCAmelCase_ )
A__ = load_tf_weights_in_transfo_xl(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
A__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {os.path.abspath(UpperCAmelCase_ )}""" )
torch.save(model.state_dict() , UpperCAmelCase_ )
print(F"""Save configuration file to {os.path.abspath(UpperCAmelCase_ )}""" )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 69 | 1 |
"""simple docstring"""
import json
import sys
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCamelCase_ , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = json.load(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = results[benchmark_name]
__SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
__SCREAMING_SNAKE_CASE = """| metric |"""
__SCREAMING_SNAKE_CASE = """|--------|"""
__SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
__SCREAMING_SNAKE_CASE = metric_vals["""new"""]
__SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = f" {new_val:f}" if isinstance(UpperCamelCase_ , (int, float) ) else """None"""
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(UpperCamelCase_ , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(UpperCamelCase_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(UpperCamelCase_ ) )
if __name__ == "__main__":
__magic_name__ = sys.argv[1]
__magic_name__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 100 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 10**9 ):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=a__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=a__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=a__ )
return parser.parse_args()
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE : Optional[int] = script_fpath.stem
SCREAMING_SNAKE_CASE : List[Any] = importlib.import_module(a__ )
# Patch sys.argv
SCREAMING_SNAKE_CASE : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 19 |
import math
a__ : List[str] = 10
a__ : Optional[int] = 7
a__ : int = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCAmelCase_( a__ = 20 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = math.comb(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
SCREAMING_SNAKE_CASE : Any = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 19 | 1 |
from math import ceil
def __lowerCAmelCase ( a__ = 1001 ) -> int:
__a = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__a = 2 * i + 1
__a = 2 * i
__a = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 6 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple =['pixel_values']
def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ =do_convert_rgb
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase )
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase )
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowerCamelCase_ ={'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 75 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''bird'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''Chef in the kitchen'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 287 |
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase : int, lowercase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCamelCase = update_area_of_max_square(lowercase, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCamelCase = update_area_of_max_square_using_dp_array(lowercase, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, lowercase, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
_UpperCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
_UpperCamelCase = [[-1] * cols for _ in range(lowercase )]
update_area_of_max_square_using_dp_array(0, 0, lowercase )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = dp_array[row][col + 1]
_UpperCamelCase = dp_array[row + 1][col + 1]
_UpperCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(dp_array[row][col], lowercase )
else:
_UpperCamelCase = 0
return largest_square_area
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = current_row[col + 1]
_UpperCamelCase = next_row[col + 1]
_UpperCamelCase = next_row[col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(current_row[col], lowercase )
else:
_UpperCamelCase = 0
_UpperCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 287 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ) -> Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ) -> Union[str, Any]:
a = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
a = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
a = object_detector(examples[0] , threshold=0.0 )
a = len(__lowerCamelCase )
self.assertGreater(__lowerCamelCase , 0 )
self.assertEqual(
__lowerCamelCase , [
{
"score": ANY(__lowerCamelCase ),
"label": ANY(__lowerCamelCase ),
"box": {"xmin": ANY(__lowerCamelCase ), "ymin": ANY(__lowerCamelCase ), "xmax": ANY(__lowerCamelCase ), "ymax": ANY(__lowerCamelCase )},
}
for i in range(__lowerCamelCase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
a = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
a = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.7_235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
] , )
a = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.7_235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
]
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = pipeline("zero-shot-object-detection" )
a = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
] , )
a = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
pass
@require_torch
@slow
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = 0.2
a = pipeline("zero-shot-object-detection" )
a = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : str ) -> Any:
a = 2
a = pipeline("zero-shot-object-detection" )
a = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
] , )
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """openai-gpt"""
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : List[str]=4_04_78 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="cls_index" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=0.1 , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 107 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = '▁'
lowerCAmelCase_ : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ : Any = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ : str = {
'facebook/xglm-564M': 20_48,
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['input_ids', 'attention_mask']
def __init__( self : Optional[int] , __a : Optional[int] , __a : Tuple="<s>" , __a : Any="</s>" , __a : Tuple="</s>" , __a : List[Any]="<s>" , __a : List[Any]="<unk>" , __a : Dict="<pad>" , __a : Optional[Dict[str, Any]] = None , **__a : Any , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_a = 7
_a = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
_a = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_a = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_a = len(self.sp_model )
_a = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__a )
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ):
_a = self.__dict__.copy()
_a = None
_a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __a : Optional[Any] ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__ ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_a = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCamelCase__ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a ))
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a ))
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCamelCase__ ( self : Tuple ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCamelCase__ ( self : List[Any] ):
_a = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self : Union[str, Any] , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : Any , __a : Tuple ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self : Any , __a : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self : str , __a : Optional[Any] ):
_a = "".join(__a ).replace(__a , " " ).strip()
return out_string
def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict ,*snake_case : int ,**snake_case : Dict ):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' ,snake_case ,)
super().__init__(*snake_case ,**snake_case )
| 334 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_SCREAMING_SNAKE_CASE = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Path , __snake_case : Union[str, None] = None , __snake_case : Union[List[str], None] = None , __snake_case : Union[str, List[str], None] = None , __snake_case : bool = True , )-> Optional[Any]:
snake_case = [file for file in os.listdir(__snake_case ) if os.path.isfile(os.path.join(__snake_case , __snake_case ) )]
if identifier is not None:
snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__snake_case , __snake_case ):
for n_ in n_identifier:
snake_case = [file for file in files if n_ not in file]
else:
snake_case = [file for file in files if n_identifier not in file]
snake_case = ignore_files or []
ignore_files.append("""__init__.py""" )
snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __snake_case )
if only_modules:
snake_case = file.split(""".""" )[0]
try:
snake_case = getattr(__snake_case , __snake_case )
snake_case = doctest.DocTestSuite(__snake_case )
snake_case = unittest.TextTestRunner().run(__snake_case )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
snake_case = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = Path("""src/transformers""" )
snake_case = """modeling"""
snake_case = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__snake_case , identifier=__snake_case , ignore_files=__snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
snake_case = Path("""src/transformers""" )
snake_case = """tokenization"""
self.analyze_directory(__snake_case , identifier=__snake_case )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = Path("""src/transformers""" )
snake_case = """configuration"""
self.analyze_directory(__snake_case , identifier=__snake_case )
def lowerCAmelCase ( self : List[Any] )-> Tuple:
snake_case = Path("""src/transformers""" )
snake_case = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__snake_case , n_identifier=__snake_case )
def lowerCAmelCase ( self : Dict )-> str:
snake_case = Path("""docs/source""" )
snake_case = ["""favicon.ico"""]
self.analyze_directory(__snake_case , ignore_files=__snake_case , only_modules=__snake_case )
| 3 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 1 |
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
while number > 0:
SCREAMING_SNAKE_CASE__ = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A ( snake_case__ = 1_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = factorial(snake_case__ )
SCREAMING_SNAKE_CASE__ = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 165 |
"""simple docstring"""
def A ( snake_case__ = 10_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1, 1
SCREAMING_SNAKE_CASE__ = 2
while True:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = fa + fa
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fa, f
index += 1
for _ in str(snake_case__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 165 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 |
import sys
__lowerCamelCase : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE ( snake_case_ : str = N ):
snake_case__ : Any = -sys.maxsize - 1
for i in range(len(snake_case_ ) - 12 ):
snake_case__ : Tuple = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ : Dict = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 286 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__magic_name__: Union[str, Any] = 10
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
for i in range(_snake_case, _snake_case ):
if array[i] == target:
return i
return -1
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : str = 0
__magic_name__ : str = len(_snake_case )
while left <= right:
if right - left < precision:
return lin_search(_snake_case, _snake_case, _snake_case, _snake_case )
__magic_name__ : Optional[Any] = (left + right) // 3 + 1
__magic_name__ : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__magic_name__ : List[str] = one_third - 1
elif array[two_third] < target:
__magic_name__ : Any = two_third + 1
else:
__magic_name__ : Optional[Any] = one_third + 1
__magic_name__ : Any = two_third - 1
else:
return -1
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(_snake_case, _snake_case, _snake_case, _snake_case )
__magic_name__ : Optional[int] = (left + right) // 3 + 1
__magic_name__ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_snake_case, one_third - 1, _snake_case, _snake_case )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, _snake_case, _snake_case, _snake_case )
else:
return rec_ternary_search(one_third + 1, two_third - 1, _snake_case, _snake_case )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__: Optional[Any] = input("Enter numbers separated by comma:\n").strip()
__magic_name__: List[Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__magic_name__: List[str] = int(input("Enter the number to be found in the list:\n").strip())
__magic_name__: Dict = ite_ternary_search(collection, target)
__magic_name__: Any = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print("Not found")
| 342 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='new-model'
if is_tf_available():
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =NewModelConfig
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''bert-base-cased'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : int = TFAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''bert-base-cased'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : List[Any] = TFAutoModelForPreTraining.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Dict = TFAutoModelForCausalLM.from_pretrained(a_ )
__snake_case , __snake_case : int = TFAutoModelForCausalLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(a_ )
__snake_case , __snake_case : int = TFAutoModelForMaskedLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a_ )
__snake_case , __snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case : Any = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Dict = TFAutoModelForSequenceClassification.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case : Optional[int] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
@require_tensorflow_probability
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(a_ )
__snake_case , __snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a_ ) , 1_44_10 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a_ ) , 1_44_10 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(a_ , a_ )
__snake_case : Optional[Any] = copy.deepcopy(model.config )
__snake_case : int = ['''FunnelBaseModel''']
__snake_case : Any = TFAutoModel.from_config(a_ )
self.assertIsInstance(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
__snake_case : Dict = TFAutoModel.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , a_ )
__snake_case : List[str] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_ , a_ )
auto_class.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_ , a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : Union[str, Any] = BertModelTester(self ).get_config()
__snake_case : str = NewModelConfig(**tiny_config.to_dict() )
__snake_case : Optional[int] = auto_class.from_config(a_ )
self.assertIsInstance(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
__snake_case : Optional[int] = auto_class.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(
a_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__snake_case : Any = TFAutoModel.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(
a_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case : Dict = TFAutoModel.from_pretrained(a_ , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(
a_ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__snake_case : Any = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(a_ , '''Use `from_pt=True` to load this model''' ):
__snake_case : Dict = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__snake_case : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__snake_case : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__snake_case : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 102 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.dummy_uncond_unet
__snake_case : Any = ScoreSdeVeScheduler()
__snake_case : int = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : str = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_A ).images
__snake_case : Any = torch.manual_seed(0 )
__snake_case : Tuple = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_A , return_dict=_A )[
0
]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''google/ncsnpp-church-256'''
__snake_case : Union[str, Any] = UNetaDModel.from_pretrained(_A )
__snake_case : List[Any] = ScoreSdeVeScheduler.from_pretrained(_A )
__snake_case : Any = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__snake_case : Union[str, Any] = torch.manual_seed(0 )
__snake_case : Optional[int] = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=_A ).images
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__snake_case : Optional[int] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 363 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Dict = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[str] = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'vit_mae'
def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = decoder_num_attention_heads
a = decoder_hidden_size
a = decoder_num_hidden_layers
a = decoder_intermediate_size
a = mask_ratio
a = norm_pix_loss
| 330 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase : Optional[Any] = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , ) -> List[Any]:
"""simple docstring"""
A : List[Any] = [file for file in os.listdir(SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
A : Optional[int] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
A : Union[str, Any] = [file for file in files if n_ not in file]
else:
A : Any = [file for file in files if n_identifier not in file]
A : Union[str, Any] = ignore_files or []
ignore_files.append('''__init__.py''' )
A : int = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , SCREAMING_SNAKE_CASE )
if only_modules:
A : List[Any] = file.split('''.''' )[0]
try:
A : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Tuple = doctest.DocTestSuite(SCREAMING_SNAKE_CASE )
A : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
A : Optional[int] = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = Path('''src/transformers''' )
A : Any = '''modeling'''
A : Tuple = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(SCREAMING_SNAKE_CASE , identifier=SCREAMING_SNAKE_CASE , ignore_files=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Dict = Path('''src/transformers''' )
A : Optional[Any] = '''tokenization'''
self.analyze_directory(SCREAMING_SNAKE_CASE , identifier=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Any = Path('''src/transformers''' )
A : Optional[int] = '''configuration'''
self.analyze_directory(SCREAMING_SNAKE_CASE , identifier=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Tuple = Path('''src/transformers''' )
A : Dict = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(SCREAMING_SNAKE_CASE , n_identifier=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : List[Any] = Path('''docs/source''' )
A : List[str] = ['''favicon.ico''']
self.analyze_directory(SCREAMING_SNAKE_CASE , ignore_files=SCREAMING_SNAKE_CASE , only_modules=SCREAMING_SNAKE_CASE )
| 3 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3 | 1 |
"""simple docstring"""
import numpy as np
def snake_case (A_ :np.ndarray , A_ :float ):
'''simple docstring'''
return np.where(vector > 0 , A_ , (alpha * (np.exp(A_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
"""simple docstring"""
def snake_case (A_ :list[int] , A_ :str ):
'''simple docstring'''
a : Optional[int] = int(A_ )
# Initialize Result
a : int = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_UpperCamelCase : Dict = []
_UpperCamelCase : str = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
_UpperCamelCase : Dict = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
_UpperCamelCase : Optional[int] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
_UpperCamelCase : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
_UpperCamelCase : Dict = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f'''Following is minimal change for {value}: ''')
_UpperCamelCase : Tuple = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 186 | 0 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase__ )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = logging.get_verbosity()
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(lowerCAmelCase__ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __A ( self ) -> str:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE = os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE = logging.get_verbosity()
self.assertEqual(
lowerCAmelCase__ , lowerCAmelCase__ , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
SCREAMING_SNAKE_CASE = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __A ( self ) -> int:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE = logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase__ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __A ( self ) -> List[Any]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning_advice(lowerCAmelCase__ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning_advice(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + '\n' )
def lowercase () -> Optional[Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 113 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = 0
while n:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 113 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase__ : str = TypeVar("T")
class a__ ( Generic[T] ):
def __init__( self , A = True ) -> None:
'''simple docstring'''
a = {} # dictionary of lists
a = directed
def lowerCAmelCase_ ( self , A , A ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
self.adj_list[destination_vertex].append(A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
a = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A )
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a = [destination_vertex]
a = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
a = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a = [destination_vertex]
a = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 365 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : int = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class a__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : Any = """resnet"""
a : Tuple = ["""basic""", """bottleneck"""]
def __init__( self , A=3 , A=64 , A=[256, 512, 1024, 2048] , A=[3, 4, 6, 3] , A="bottleneck" , A="relu" , A=False , A=None , A=None , **A , ) -> Any:
'''simple docstring'''
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
a = num_channels
a = embedding_size
a = hidden_sizes
a = depths
a = layer_type
a = hidden_act
a = downsample_in_first_stage
a = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(A ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class a__ ( UpperCamelCase__ ):
a : Optional[int] = version.parse("""1.11""" )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 180 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(A_, A_ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_lowerCamelCase : Optional[int] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(A_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''file.csv'''
lowerCamelCase__ : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = tmp_path / '''malformed_file.csv'''
lowerCamelCase__ : List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = tmp_path / '''csv_with_image.csv'''
lowerCamelCase__ : int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = tmp_path / '''csv_with_label.csv'''
lowerCamelCase__ : List[Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''csv_with_int_list.csv'''
lowerCamelCase__ : Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = Csv()
lowerCamelCase__ : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCAmelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Tuple = f.read().splitlines()[1]
lowerCamelCase__ : Any = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowerCamelCase__ : List[str] = csv._generate_tables([[csv_file_with_image]] )
lowerCamelCase__ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : List[Any] = f.read().splitlines()[1:]
lowerCamelCase__ : List[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowerCamelCase__ : str = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCAmelCase ) for label in labels]
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[str] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda UpperCAmelCase : [int(UpperCAmelCase ) for i in x.split()]} )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 142 | 0 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A :
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def __lowerCAmelCase ( cls : str , lowerCAmelCase_ : CommonSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray ) -> str:
"""simple docstring"""
return cls(common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
@dataclass
class A ( _a ):
lowercase_ = 42
class A ( _a ,_a ):
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return True
@register_to_config
def __init__( self : Tuple , lowerCAmelCase_ : int = 10_00 , lowerCAmelCase_ : float = 0.0_0_0_1 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[jnp.ndarray] = None , lowerCAmelCase_ : str = "fixed_small" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : jnp.dtype = jnp.floataa , ) -> Optional[Any]:
"""simple docstring"""
_a = dtype
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
_a = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_a = jnp.array(1.0 , dtype=self.dtype )
_a = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : DDPMSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : DDPMSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
_a = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_a = (jnp.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : DDPMSchedulerState , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=None ) -> Optional[Any]:
"""simple docstring"""
_a = state.common.alphas_cumprod[t]
_a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_a = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_a = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_a = jnp.clip(lowerCAmelCase_ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_a = jnp.log(jnp.clip(lowerCAmelCase_ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_a = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_a = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_a = variance
_a = state.common.betas[t]
_a = (predicted_variance + 1) / 2
_a = frac * max_log + (1 - frac) * min_log
return variance
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : DDPMSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : Optional[jax.random.KeyArray] = None , lowerCAmelCase_ : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_a = timestep
if key is None:
_a = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_a , _a = jnp.split(lowerCAmelCase_ , sample.shape[1] , axis=1 )
else:
_a = None
# 1. compute alphas, betas
_a = state.common.alphas_cumprod[t]
_a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_a = model_output
elif self.config.prediction_type == "v_prediction":
_a = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_a = jnp.clip(lowerCAmelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_a = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_a = jax.random.split(lowerCAmelCase_ , num=1 )
_a = jax.random.normal(lowerCAmelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase_ , lowerCAmelCase_ , predicted_variance=lowerCAmelCase_ ) ** 0.5) * noise
_a = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase_ , state=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : DDPMSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : DDPMSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __len__( self : Any ) -> str:
"""simple docstring"""
return self.config.num_train_timesteps
| 179 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A ( _a ,_a ):
lowercase_ = 'nat'
lowercase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[Any]=64 , lowerCAmelCase_ : Dict=[3, 4, 6, 5] , lowerCAmelCase_ : Dict=[2, 4, 8, 16] , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Dict=3.0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(lowerCAmelCase_ )
_a = num_heads
_a = kernel_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_a = layer_scale_init_value
_a = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 179 | 1 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCamelCase = logging.get_logger(__name__)
@dataclass
class A__ :
lowercase = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowercase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowercase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.task_name.lower()
class A__ ( UpperCamelCase__ ):
lowercase = "train"
lowercase = "dev"
lowercase = "test"
class A__ ( UpperCamelCase__ ):
lowercase = 42
lowercase = 42
lowercase = 42
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = Split.train , UpperCamelCase__ = None , ) -> Dict:
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , UpperCamelCase__ , )
A_ = args
A_ = glue_processors[args.task_name]()
A_ = glue_output_modes[args.task_name]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
A_ = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
A_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
A_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ = label_list[2], label_list[1]
A_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ = cached_features_file + """.lock"""
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
A_ = time.time()
A_ = torch.load(UpperCamelCase__ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
A_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A_ = self.processor.get_test_examples(args.data_dir )
else:
A_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A_ = examples[:limit_length]
A_ = glue_convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , max_length=args.max_seq_length , label_list=UpperCamelCase__ , output_mode=self.output_mode , )
A_ = time.time()
torch.save(self.features , UpperCamelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCamelCase__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.label_list
| 162 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case : Optional[Any] = getattr(lowercase ,lowercase )
if weight_type is not None:
snake_case : Any = getattr(lowercase ,lowercase ).shape
else:
snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case : str = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : List[str] = value
elif weight_type == "bias":
snake_case : int = value
else:
snake_case : str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : Optional[Any] = []
snake_case : Optional[Any] = fairseq_model.state_dict()
snake_case : Tuple = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase ,lowercase ,lowercase ,lowercase ,hf_model.config.feat_extract_norm == """group""" ,)
snake_case : Any = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Optional[int] = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case : Union[str, Any] = True
if "*" in mapped_key:
snake_case : Dict = name.split(lowercase )[0].split(""".""" )[-2]
snake_case : str = mapped_key.replace("""*""" ,lowercase )
if "weight_g" in name:
snake_case : int = """weight_g"""
elif "weight_v" in name:
snake_case : Optional[int] = """weight_v"""
elif "weight" in name:
snake_case : Tuple = """weight"""
elif "bias" in name:
snake_case : List[Any] = """bias"""
else:
snake_case : List[str] = None
set_recursively(lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Dict:
snake_case : str = full_name.split("""conv_layers.""" )[-1]
snake_case : Dict = name.split(""".""" )
snake_case : Any = int(items[0] )
snake_case : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=None ,lowercase=None ,lowercase=True ) -> Union[str, Any]:
if config_path is not None:
snake_case : Optional[int] = HubertConfig.from_pretrained(lowercase )
else:
snake_case : Tuple = HubertConfig()
if is_finetuned:
if dict_path:
snake_case : List[str] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case : Optional[int] = target_dict.pad_index
snake_case : Any = target_dict.bos_index
snake_case : Dict = target_dict.eos_index
snake_case : List[str] = len(target_dict.symbols )
snake_case : Union[str, Any] = os.path.join(lowercase ,"""vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase ,exist_ok=lowercase )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase )
snake_case : Union[str, Any] = WavaVecaCTCTokenizer(
lowercase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=lowercase ,)
snake_case : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
snake_case : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=lowercase ,return_attention_mask=lowercase ,)
snake_case : Dict = WavaVecaProcessor(feature_extractor=lowercase ,tokenizer=lowercase )
processor.save_pretrained(lowercase )
snake_case : List[Any] = HubertForCTC(lowercase )
else:
snake_case : Any = HubertModel(lowercase )
if is_finetuned:
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case : Any = model[0].eval()
recursively_load_weights(lowercase ,lowercase ,lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCamelCase : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 124 | 0 |
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Union[str, Any] , **_UpperCamelCase : str ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : List[Any] , **_UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase )
def __snake_case( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=_UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=_UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def __snake_case( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 206 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _a ( unittest.TestCase ):
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = get_activation('''swish''' )
self.assertIsInstance(lowercase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = get_activation('''silu''' )
self.assertIsInstance(lowercase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = get_activation('''mish''' )
self.assertIsInstance(lowercase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = get_activation('''gelu''' )
self.assertIsInstance(lowercase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 34 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 | 1 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCamelCase :
'''simple docstring'''
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A__ : str =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : int =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : Union[str, Any] =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Dict =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : Union[str, Any] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[Any] =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : str =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : Dict =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Optional[int] =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : int =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A__ : List[str] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : Tuple =self.get_dummy_components()
A__ : str =self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[int] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : str =inputs["""prompt"""]
A__ : Optional[int] =inputs["""generator"""]
A__ : Optional[Any] =inputs["""num_inference_steps"""]
A__ : Union[str, Any] =inputs["""output_type"""]
if "image" in inputs:
A__ : Union[str, Any] =inputs["""image"""]
else:
A__ : List[Any] =None
if "mask_image" in inputs:
A__ : Union[str, Any] =inputs["""mask_image"""]
else:
A__ : Tuple =None
if "original_image" in inputs:
A__ : Optional[Any] =inputs["""original_image"""]
else:
A__ : Tuple =None
A__ , A__ : Optional[Any] =pipe.encode_prompt(lowerCAmelCase_ )
# inputs with prompt converted to embeddings
A__ : Optional[int] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : int =image
if mask_image is not None:
A__ : Tuple =mask_image
if original_image is not None:
A__ : Optional[int] =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : int =self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) is None , f"`{optional_component}` did not stay set to None after loading." , )
A__ : Dict =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : int =inputs["""generator"""]
A__ : str =inputs["""num_inference_steps"""]
A__ : Optional[Any] =inputs["""output_type"""]
# inputs with prompt converted to embeddings
A__ : List[Any] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : int =image
if mask_image is not None:
A__ : int =mask_image
if original_image is not None:
A__ : Optional[int] =original_image
A__ : List[str] =pipe_loaded(**lowerCAmelCase_ )[0]
A__ : Union[str, Any] =np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =self.get_dummy_components()
A__ : int =self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : List[Any] =pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : List[Any] =self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A__ : int =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Tuple =pipe_loaded(**lowerCAmelCase_ )[0]
A__ : Tuple =np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
| 136 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__snake_case : Optional[Any] = None
__snake_case : Optional[Any] = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
__snake_case : Union[str, Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str=1, __snake_case : Tuple=256 ) -> str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __lowerCamelCase ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
return json.load(__snake_case )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Tuple ) -> Dict:
"""simple docstring"""
with open(__snake_case, """w""" ) as f:
json.dump(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Any, __snake_case : Any, __snake_case : Tuple=True ) -> Any:
"""simple docstring"""
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : List[Any] =os.path.join(__snake_case, """tmp""" )
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : Dict =read_json(os.path.join(__snake_case, """params.json""" ) )
A__ : Dict =NUM_SHARDS[model_size]
A__ : List[str] =params["""n_layers"""]
A__ : int =params["""n_heads"""]
A__ : str =n_heads // num_shards
A__ : Tuple =params["""dim"""]
A__ : Union[str, Any] =dim // n_heads
A__ : str =1_00_00.0
A__ : Any =1.0 / (base ** (torch.arange(0, __snake_case, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ : Optional[Any] =params["""n_kv_heads"""] # for GQA / MQA
A__ : int =n_heads_per_shard // num_key_value_heads
A__ : int =dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ : List[Any] =n_heads
A__ : List[str] =n_heads_per_shard
A__ : Dict =dim
# permute for sliced rotary
def permute(__snake_case : Tuple, __snake_case : Optional[int]=n_heads, __snake_case : int=dim, __snake_case : Optional[Any]=dim ):
return w.view(__snake_case, dima // n_heads // 2, 2, __snake_case ).transpose(1, 2 ).reshape(__snake_case, __snake_case )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ : List[str] =torch.load(os.path.join(__snake_case, """consolidated.00.pth""" ), map_location="""cpu""" )
else:
# Sharded
A__ : Optional[Any] =[
torch.load(os.path.join(__snake_case, f"consolidated.{i:02d}.pth" ), map_location="""cpu""" )
for i in range(__snake_case )
]
A__ : Optional[Any] =0
A__ : str ={"""weight_map""": {}}
for layer_i in range(__snake_case ):
A__ : Dict =f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Dict ={
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ : Any ={
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
A__ : Optional[Any] =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ) )
A__ : int =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ), __snake_case, __snake_case, __snake_case, )
A__ : int =torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(__snake_case )], dim=1 )
A__ : Optional[int] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(__snake_case )], dim=0 )
A__ : str =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(__snake_case )], dim=1 )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(__snake_case )], dim=0 )
A__ : List[Any] =inv_freq
for k, v in state_dict.items():
A__ : Optional[Any] =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
A__ : Tuple =f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Tuple ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
A__ : Any ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__snake_case )], dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__snake_case )], dim=0 ),
}
for k, v in state_dict.items():
A__ : int =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
# Write configs
A__ : Union[str, Any] ={"""total_size""": param_count * 2}
write_json(__snake_case, os.path.join(__snake_case, """pytorch_model.bin.index.json""" ) )
A__ : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
A__ : List[Any] =params["""multiple_of"""] if """multiple_of""" in params else 256
A__ : int =LlamaConfig(
hidden_size=__snake_case, intermediate_size=compute_intermediate_size(__snake_case, __snake_case, __snake_case ), num_attention_heads=params["""n_heads"""], num_hidden_layers=params["""n_layers"""], rms_norm_eps=params["""norm_eps"""], num_key_value_heads=__snake_case, )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
A__ : List[Any] =LlamaForCausalLM.from_pretrained(__snake_case, torch_dtype=torch.floataa, low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__snake_case, safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Dict ) -> Tuple:
"""simple docstring"""
A__ : List[Any] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
A__ : List[str] =tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""", help="""Location of LLaMA weights, which contains tokenizer.model and model folders""", )
parser.add_argument(
"""--model_size""", choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""], )
parser.add_argument(
"""--output_dir""", help="""Location to write HF model and tokenizer""", )
parser.add_argument("""--safe_serialization""", type=__snake_case, help="""Whether or not to save using `safetensors`.""" )
A__ : Any =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
A__ : List[Any] =os.path.join(args.input_dir, """tokenizer.model""" )
write_tokenizer(args.output_dir, __snake_case )
if __name__ == "__main__":
main()
| 136 | 1 |
"""simple docstring"""
import qiskit
def __lowerCamelCase ( a_ : int , a_ : int ) -> qiskit.result.counts.Counts:
__SCREAMING_SNAKE_CASE :Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__SCREAMING_SNAKE_CASE :Union[str, Any] = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__SCREAMING_SNAKE_CASE :Tuple = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase_ = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}') | 191 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE :Tuple = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=a_ )
__SCREAMING_SNAKE_CASE :str = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
__SCREAMING_SNAKE_CASE :int = parser.parse_args()
if not hasattr(a_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main() | 191 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : str ):
snake_case_ : Optional[Any] = dataset
snake_case_ : int = process
snake_case_ : List[str] = params
def __len__( self : Tuple ):
return len(self.dataset )
def __getitem__( self : int , lowercase_ : Tuple ):
snake_case_ : List[str] = self.dataset[i]
snake_case_ : Optional[int] = self.process(lowercase_ , **self.params )
return processed
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any]=None ):
snake_case_ : Union[str, Any] = loader
snake_case_ : Optional[int] = infer
snake_case_ : Optional[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case_ : Tuple = None
snake_case_ : Union[str, Any] = loader_batch_size
# Internal bookkeeping
snake_case_ : Tuple = None
snake_case_ : Union[str, Any] = None
def __len__( self : List[str] ):
return len(self.loader )
def __iter__( self : List[str] ):
snake_case_ : int = iter(self.loader )
return self
def _snake_case ( self : Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
snake_case_ : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case_ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase_ , lowercase_ ):
# Convert ModelOutput to tuple first
snake_case_ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
snake_case_ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case_ : Dict = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ , lowercase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
snake_case_ : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case_ : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
snake_case_ : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ : Optional[int] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ : List[Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case_ : Dict = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case_ : List[str] = self._loader_batch_data.__class__(lowercase_ )
self._loader_batch_index += 1
return result
def _snake_case ( self : Dict ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case_ : List[str] = next(self.iterator )
snake_case_ : List[Any] = self.infer(lowercase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase_ , torch.Tensor ):
snake_case_ : Dict = processed
else:
snake_case_ : Any = list(processed.keys() )[0]
snake_case_ : Tuple = processed[key]
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : Any = len(lowercase_ )
else:
snake_case_ : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
snake_case_ : Optional[Any] = processed
snake_case_ : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Dict=None ):
super().__init__(lowercase_ , lowercase_ , lowercase_ )
def __iter__( self : Optional[Any] ):
snake_case_ : Optional[int] = iter(self.loader )
snake_case_ : int = None
return self
def _snake_case ( self : Dict ):
if self.subiterator is None:
snake_case_ : str = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
snake_case_ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case_ : Dict = self.infer(next(self.iterator ) , **self.params )
snake_case_ : List[str] = next(self.subiterator )
return processed
class _UpperCAmelCase ( lowerCAmelCase__):
def __iter__( self : Any ):
snake_case_ : Dict = iter(self.loader )
return self
def _snake_case ( self : Optional[Any] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
snake_case_ : List[Any] = False
snake_case_ : List[Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case_ : Optional[int] = self.loader_batch_item()
snake_case_ : Optional[int] = item.pop('''is_last''' )
accumulator.append(lowercase_ )
if is_last:
return accumulator
while not is_last:
snake_case_ : Dict = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowercase_ , torch.Tensor ):
snake_case_ : Optional[int] = processed
else:
snake_case_ : Dict = list(processed.keys() )[0]
snake_case_ : Tuple = processed[key]
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : Optional[Any] = len(lowercase_ )
else:
snake_case_ : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ : Optional[Any] = observed_batch_size
snake_case_ : str = processed
snake_case_ : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case_ : Union[str, Any] = self.loader_batch_item()
snake_case_ : Optional[int] = item.pop('''is_last''' )
accumulator.append(lowercase_ )
if is_last:
return accumulator
else:
snake_case_ : Any = processed
snake_case_ : int = item.pop('''is_last''' )
accumulator.append(lowercase_ )
return accumulator
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Union[str, Any] , lowercase_ : Dataset , lowercase_ : str ):
snake_case_ : str = dataset
snake_case_ : Any = key
def __len__( self : Optional[int] ):
return len(self.dataset )
def __getitem__( self : Union[str, Any] , lowercase_ : Any ):
return self.dataset[i][self.key]
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[Any] , lowercase_ : Dataset , lowercase_ : str , lowercase_ : str ):
snake_case_ : int = dataset
snake_case_ : List[Any] = keya
snake_case_ : str = keya
def __len__( self : int ):
return len(self.dataset )
def __getitem__( self : List[Any] , lowercase_ : int ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 155 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCAmelCase ( unittest.TestCase):
_lowerCAmelCase : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCAmelCase : Union[str, Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _snake_case ( self : Any ):
snake_case_ : Dict = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
snake_case_ : List[str] = text_generator('''This is a test''' , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
snake_case_ : Tuple = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
lowercase_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
snake_case_ : int = text_generator('''This is a test''' , do_sample=lowercase_ , num_return_sequences=2 , return_tensors=lowercase_ )
self.assertEqual(
lowercase_ , [
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
] , )
snake_case_ : Tuple = text_generator.model.config.eos_token_id
snake_case_ : Any = '''<pad>'''
snake_case_ : Optional[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=lowercase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase_ , )
self.assertEqual(
lowercase_ , [
[
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
],
[
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
],
] , )
@require_tf
def _snake_case ( self : Any ):
snake_case_ : List[str] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
snake_case_ : List[Any] = text_generator('''This is a test''' , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
snake_case_ : Tuple = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def _snake_case ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int ):
snake_case_ : str = TextGenerationPipeline(model=lowercase_ , tokenizer=lowercase_ )
return text_generator, ["This is a test", "Another test"]
def _snake_case ( self : Any ):
snake_case_ : int = '''Hello I believe in'''
snake_case_ : Dict = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ : Optional[Any] = text_generator(lowercase_ )
self.assertEqual(
lowercase_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
snake_case_ : Any = text_generator(lowercase_ , stop_sequence=''' fe''' )
self.assertEqual(lowercase_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def _snake_case ( self : Optional[int] , lowercase_ : str , lowercase_ : List[Any] ):
snake_case_ : Any = text_generator.model
snake_case_ : str = text_generator.tokenizer
snake_case_ : Tuple = text_generator('''This is a test''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
snake_case_ : Any = text_generator('''This is a test''' , return_full_text=lowercase_ )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
snake_case_ : Optional[Any] = pipeline(task='''text-generation''' , model=lowercase_ , tokenizer=lowercase_ , return_full_text=lowercase_ )
snake_case_ : str = text_generator('''This is a test''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
snake_case_ : List[str] = text_generator('''This is a test''' , return_full_text=lowercase_ )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
snake_case_ : List[Any] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ : List[Any] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
] , )
with self.assertRaises(lowercase_ ):
snake_case_ : int = text_generator('''test''' , return_full_text=lowercase_ , return_text=lowercase_ )
with self.assertRaises(lowercase_ ):
snake_case_ : Dict = text_generator('''test''' , return_full_text=lowercase_ , return_tensors=lowercase_ )
with self.assertRaises(lowercase_ ):
snake_case_ : Dict = text_generator('''test''' , return_text=lowercase_ , return_tensors=lowercase_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ : str = text_generator('''''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ : List[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
snake_case_ : Tuple = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowercase_ ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
snake_case_ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ : Tuple = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ : Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ : Optional[Any] = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ : int = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def _snake_case ( self : List[str] ):
import torch
snake_case_ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self : Dict ):
import torch
snake_case_ : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=lowercase_ , top_p=0.5 )
def _snake_case ( self : int ):
snake_case_ : int = '''Hello world'''
snake_case_ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
snake_case_ : Optional[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
snake_case_ : Dict = logging.get_logger('''transformers.generation.utils''' )
snake_case_ : Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : List[Any] = text_generator(lowercase_ , max_length=10 , max_new_tokens=1 )
self.assertIn(lowercase_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : int = text_generator(lowercase_ , max_new_tokens=1 )
self.assertNotIn(lowercase_ , cl.out )
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : Optional[Any] = text_generator(lowercase_ , max_length=10 )
self.assertNotIn(lowercase_ , cl.out )
| 155 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = generate_pascal_triangle(lowercase_ )
for row_idx in range(lowercase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase = []
for current_row_idx in range(lowercase_ ):
UpperCAmelCase = populate_current_row(lowercase_ , lowercase_ )
triangle.append(lowercase_ )
return triangle
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase , UpperCAmelCase = 1, 1
for current_col_idx in range(1 , lowercase_ ):
calculate_current_element(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return current_row
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase = above_to_left_elt + above_to_right_elt
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase = [[1]]
for row_index in range(1 , lowercase_ ):
UpperCAmelCase = [0] + result[-1] + [0]
UpperCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase = sum(divmod(lowercase_ , 2 ) )
UpperCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase = row_first_half + row_second_half
result.append(lowercase_ )
return result
def _lowerCAmelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase_ , lowercase_ ) -> None:
UpperCAmelCase = F"""{func.__name__}({value})"""
UpperCAmelCase = timeit(F"""__main__.{call}""" , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase_ , lowercase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 78 |
import cmath
import math
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = math.radians(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = math.radians(SCREAMING_SNAKE_CASE )
# Convert voltage and current to rectangular form
UpperCamelCase__ : str = cmath.rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = cmath.rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( snake_case_):
"""simple docstring"""
lowercase = ['image_processor', 'tokenizer']
lowercase = 'CLIPImageProcessor'
lowercase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : str , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=None , **lowerCamelCase : Union[str, Any] ) -> Dict:
lowerCAmelCase_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
lowerCAmelCase_ : Optional[int] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Optional[int] , lowerCamelCase : Dict=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : List[str] ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase_ : List[str] = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if images is not None:
lowerCAmelCase_ : Optional[Any] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
lowerCAmelCase_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __lowercase ( self : Optional[Any] , *lowerCamelCase : Dict , **lowerCamelCase : int ) -> int:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __lowercase ( self : Dict ) -> Tuple:
lowerCAmelCase_ : int = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 356 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['note_seq']
def __init__( self : Tuple , *lowerCamelCase : Optional[int] , **lowerCamelCase : Tuple ) -> Any:
requires_backends(self , ["""note_seq"""] )
@classmethod
def __lowercase ( cls : Optional[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Dict ) -> List[Any]:
requires_backends(cls , ["""note_seq"""] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ["""note_seq"""] )
| 89 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowercase )
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
def extract(*lowercase , **lowercase ):
class a__ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
A__ = torch.ones([0] )
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=lowercase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
A__ = 77
A__ = self.dummy_image.to(lowercase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase )
A__ = alt_pipe.to(lowercase )
alt_pipe.set_progress_bar_config(disable=lowercase )
A__ = "A painting of a squirrel eating a burger"
A__ = torch.Generator(device=lowercase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowercase , )
A__ = output.images
A__ = torch.Generator(device=lowercase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowercase , return_dict=lowercase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=lowercase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
A__ = 77
A__ = self.dummy_image.to(lowercase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase )
A__ = alt_pipe.to(lowercase )
alt_pipe.set_progress_bar_config(disable=lowercase )
A__ = "A painting of a squirrel eating a burger"
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=lowercase , num_inference_steps=2 , output_type="np" , image=lowercase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((760, 504) )
A__ = "BAAI/AltDiffusion"
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase , safety_checker=lowercase , )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A__ = "A fantasy landscape, trending on artstation"
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , generator=lowercase , output_type="np" , )
A__ = output.images[0]
A__ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
A__ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A__ = init_image.resize((768, 512) )
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
A__ = "BAAI/AltDiffusion"
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase , safety_checker=lowercase , )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A__ = "A fantasy landscape, trending on artstation"
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , generator=lowercase , output_type="np" , )
A__ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 68 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( snake_case_ ):
lowercase = 'megatron-bert'
def __init__( self : List[str] , snake_case : Tuple=2_9_0_5_6 , snake_case : Dict=1_0_2_4 , snake_case : Dict=2_4 , snake_case : Union[str, Any]=1_6 , snake_case : Optional[int]=4_0_9_6 , snake_case : Optional[int]="gelu" , snake_case : Any=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[int]=5_1_2 , snake_case : List[Any]=2 , snake_case : Tuple=0.02 , snake_case : Optional[Any]=1e-12 , snake_case : str=0 , snake_case : Optional[int]="absolute" , snake_case : Union[str, Any]=True , **snake_case : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , **snake_case )
UpperCamelCase_ : Optional[Any] = vocab_size
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : Union[str, Any] = num_hidden_layers
UpperCamelCase_ : List[Any] = num_attention_heads
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : List[str] = intermediate_size
UpperCamelCase_ : List[Any] = hidden_dropout_prob
UpperCamelCase_ : Any = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Dict = type_vocab_size
UpperCamelCase_ : Optional[int] = initializer_range
UpperCamelCase_ : Optional[Any] = layer_norm_eps
UpperCamelCase_ : Dict = position_embedding_type
UpperCamelCase_ : List[str] = use_cache
| 175 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = '''The dog is cute and lives in the garden house'''
lowercase__ : int = jnp.array([tokenizer.encode(_snake_case )] )
lowercase__ : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ : Optional[Any] = model(_snake_case )['''last_hidden_state''']
self.assertEqual(output.shape ,_snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_snake_case ,atol=1e-3 ) )
| 302 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( __UpperCAmelCase ):
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'LayoutLMv2ImageProcessor'
__snake_case = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__ ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', UpperCamelCase__, )
lowerCAmelCase_ = kwargs.pop('''feature_extractor''' )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__, UpperCamelCase__ )
def __call__( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = True, UpperCamelCase__ = False, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = 0, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = True, UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, return_tensors=UpperCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase_ = features['''words''']
lowerCAmelCase_ = self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=UpperCamelCase__, add_special_tokens=UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, max_length=UpperCamelCase__, stride=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, return_special_tokens_mask=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, return_length=UpperCamelCase__, verbose=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, )
# add pixel values
lowerCAmelCase_ = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCAmelCase_ = self.get_overflowing_images(UpperCamelCase__, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCAmelCase_ = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f" {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', UpperCamelCase__, )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', UpperCamelCase__, )
return self.image_processor
| 278 |
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 278 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase_ :Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase_ :int = PipelineTesterMixin.required_optional_params - {"latents"}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , __A , __A=0 ) -> Optional[int]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[int] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Any = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __lowerCAmelCase ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Dict:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 371 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 0 |
"""simple docstring"""
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[Any] = [0] * len(__UpperCamelCase )
snake_case_ :int = []
snake_case_ :List[Any] = []
snake_case_ :int = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ :Tuple = queue.pop(0 )
cnt += 1
topo.append(__UpperCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
if cnt != len(__UpperCamelCase ):
print("""Cycle exists""" )
else:
print(__UpperCamelCase )
# Adjacency List of Graph
__a = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 66 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Any = n
__lowercase : Any = [None] * self.n
__lowercase : Optional[int] = 0 # index of the first element
__lowercase : Optional[int] = 0
__lowercase : Any = 0
def __len__( self ) -> int:
return self.size
def _lowerCamelCase ( self ) -> bool:
return self.size == 0
def _lowerCamelCase ( self ) -> Optional[Any]:
return False if self.is_empty() else self.array[self.front]
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
__lowercase : Any = data
__lowercase : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCamelCase ( self ) -> List[Any]:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
__lowercase : Any = self.array[self.front]
__lowercase : int = None
__lowercase : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 249 | 0 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
snake_case_ = AutoConfig.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
AutoTokenizer.from_pretrained(UpperCAmelCase ).save_pretrained(UpperCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 355 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : str ):
lowercase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_attention_heads" ) )
class snake_case__:
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict=13 , SCREAMING_SNAKE_CASE : List[str]=64 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Any=1 , SCREAMING_SNAKE_CASE : str=16 , SCREAMING_SNAKE_CASE : Tuple=[128, 256, 384] , SCREAMING_SNAKE_CASE : int=[4, 6, 8] , SCREAMING_SNAKE_CASE : Tuple=[2, 3, 4] , SCREAMING_SNAKE_CASE : List[str]=[16, 16, 16] , SCREAMING_SNAKE_CASE : str=0 , SCREAMING_SNAKE_CASE : Tuple=[2, 2, 2] , SCREAMING_SNAKE_CASE : Dict=[2, 2, 2] , SCREAMING_SNAKE_CASE : List[Any]=0.02 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=2 , ):
lowercase__ : Any = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Tuple = image_size
lowercase__ : List[Any] = num_channels
lowercase__ : int = kernel_size
lowercase__ : List[str] = stride
lowercase__ : Dict = padding
lowercase__ : Tuple = hidden_sizes
lowercase__ : Tuple = num_attention_heads
lowercase__ : List[Any] = depths
lowercase__ : Optional[int] = key_dim
lowercase__ : List[Any] = drop_path_rate
lowercase__ : List[str] = patch_size
lowercase__ : Union[str, Any] = attention_ratio
lowercase__ : Union[str, Any] = mlp_ratio
lowercase__ : Optional[Any] = initializer_range
lowercase__ : str = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ : Union[str, Any] = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = num_labels
lowercase__ : Optional[Any] = initializer_range
def snake_case ( self : Optional[int] ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Union[str, Any] = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Union[str, Any] = LevitModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = (self.image_size, self.image_size)
lowercase__ , lowercase__ : List[Any] = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ : int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ : List[str] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Dict = self.num_labels
lowercase__ : Dict = LevitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : int = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Any ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = LevitModelTester(self )
lowercase__ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : Dict ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def snake_case ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def snake_case ( self : List[str] ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : List[str] ):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = outputs.hidden_states
lowercase__ : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ : Any = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ : Optional[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False ):
lowercase__ : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self : str ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : int ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : List[str] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ : Tuple = False
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : int = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : List[Any] ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
lowercase__ : str = problem_type["title"]
lowercase__ : List[str] = problem_type["num_labels"]
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
lowercase__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowercase__ : Optional[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as warning_list:
lowercase__ : int = model(**SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def snake_case ( self : List[str] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self : int ):
lowercase__ : Union[str, Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : str = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Dict = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 130 |
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not sequence:
return []
if len(lowerCamelCase__ ) == 1:
return list(lowerCamelCase__ )
lowercase__ : List[Any] = 0
lowercase__ : Any = len(lowerCamelCase__ ) - 1
lowercase__ : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowercase__ , lowercase__ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowercase__ , lowercase__ : Union[str, Any] = sequence[high], sequence[mid]
high -= 1
else:
lowercase__ : Tuple = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(lowerCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by commas:\n''').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 130 | 1 |
def _UpperCAmelCase (UpperCamelCase_ : list[list] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(UpperCamelCase_ ):
_lowerCAmelCase : Optional[int] = row[0]
for column_index, column in enumerate(UpperCamelCase_ ):
if magnitude == 0:
_lowerCAmelCase : Union[str, Any] = column
continue
_lowerCAmelCase : Optional[Any] = column / magnitude
# Subtract to cancel term
_lowerCAmelCase : Optional[Any] = current_set[0]
_lowerCAmelCase : Dict = [first_row]
_lowerCAmelCase : int = current_set[1::]
for row in current_set:
_lowerCAmelCase : Dict = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(UpperCamelCase_ )
continue
for column_index in range(len(UpperCamelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(UpperCamelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCAmelCase : List[str] = final_set[0]
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : List[Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCAmelCase : str = simplify(UpperCamelCase_ )
for i in range(len(UpperCamelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , UpperCamelCase_ )
_lowerCAmelCase : int = resultant
return final_set
def _UpperCAmelCase (UpperCamelCase_ : list[list] ):
'''simple docstring'''
if len(UpperCamelCase_ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_lowerCAmelCase : str = len(UpperCamelCase_ ) + 1
if any(len(UpperCamelCase_ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(UpperCamelCase_ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(UpperCamelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCAmelCase : Dict = equations.copy()
if any(0 in row for row in data_set ):
_lowerCAmelCase : Dict = data_set.copy()
_lowerCAmelCase : List[str] = []
for row_index, row in enumerate(UpperCamelCase_ ):
if 0 not in row:
_lowerCAmelCase : List[str] = data_set.pop(UpperCamelCase_ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , UpperCamelCase_ )
_lowerCAmelCase : Any = data_set.copy()
_lowerCAmelCase : Dict = simplify(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = simplified[::-1]
_lowerCAmelCase : list = []
for row in simplified:
_lowerCAmelCase : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCAmelCase : Tuple = row.copy()[: len(UpperCamelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(UpperCamelCase_ ) == 0:
solutions.append(0 )
continue
_lowerCAmelCase : Optional[int] = temp_row[1::]
_lowerCAmelCase : Any = temp_row[::-1]
for column_index, column in enumerate(UpperCamelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = []
for item in solutions:
final.append(float(round(UpperCamelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Union[str, Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 366 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case (_a ):
lowerCAmelCase__ = "ibert"
def __init__( self : int , _UpperCAmelCase : Optional[int]=3_0522 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=1E-12 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any="none" , **_UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : str = position_embedding_type
_lowerCAmelCase : int = quant_mode
_lowerCAmelCase : str = force_dequant
class __snake_case (_a ):
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 159 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "marian"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __SCREAMING_SNAKE_CASE : str=58_101 , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[Any]=1_024 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Any=4_096 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=4_096 , __SCREAMING_SNAKE_CASE : int=16 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : int=1_024 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=58_100 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : str=58_100 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , **__SCREAMING_SNAKE_CASE : str , ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = decoder_vocab_size or vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE = {0: """batch"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCAmelCase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super().outputs
else:
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE = dict(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
__SCREAMING_SNAKE_CASE = common_inputs["""decoder_input_ids"""].shape[1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , dim=1 )
__SCREAMING_SNAKE_CASE = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - min_num_layers
__SCREAMING_SNAKE_CASE = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) )
return common_inputs
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
__SCREAMING_SNAKE_CASE = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(__SCREAMING_SNAKE_CASE )
]
return common_inputs
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE = dict(tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return common_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return common_inputs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__ ( self : str ) -> float:
"""simple docstring"""
return 1E-4
| 331 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AltDiffusionPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__SCREAMING_SNAKE_CASE = 77
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 331 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _snake_case ( lowercase__ ):
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_lowerCamelCase : int = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
_lowerCamelCase : Union[str, Any] = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
_lowerCamelCase : List[str] = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
_lowerCamelCase : List[str] = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
_lowerCamelCase : Union[str, Any] = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
_lowerCamelCase : Optional[int] = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
_lowerCamelCase : int = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
_lowerCamelCase : List[Any] = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
_lowerCamelCase : str = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
_lowerCamelCase : Dict = key.replace('image_encoder.module' , 'flava.image_model' )
_lowerCamelCase : Optional[Any] = key.replace('text_encoder.module' , 'flava.text_model' )
_lowerCamelCase : List[Any] = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
_lowerCamelCase : Optional[Any] = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
_lowerCamelCase : List[Any] = key.replace('text_projection' , 'flava.text_projection' )
_lowerCamelCase : Any = key.replace('image_projection' , 'flava.image_projection' )
_lowerCamelCase : Any = value.float()
for key, value in codebook_state_dict.items():
_lowerCamelCase : Dict = value
return upgrade
@torch.no_grad()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
if config_path is not None:
_lowerCamelCase : List[str] = FlavaConfig.from_pretrained(snake_case_ )
else:
_lowerCamelCase : str = FlavaConfig()
_lowerCamelCase : int = FlavaForPreTraining(snake_case_ ).eval()
_lowerCamelCase : int = convert_dalle_checkpoint(snake_case_ , snake_case_ , save_checkpoint=snake_case_ )
if os.path.exists(snake_case_ ):
_lowerCamelCase : List[str] = torch.load(snake_case_ , map_location='cpu' )
else:
_lowerCamelCase : str = torch.hub.load_state_dict_from_url(snake_case_ , map_location='cpu' )
_lowerCamelCase : Any = upgrade_state_dict(snake_case_ , snake_case_ )
hf_model.load_state_dict(snake_case_ )
_lowerCamelCase : Dict = hf_model.state_dict()
_lowerCamelCase : Optional[Any] = count_parameters(snake_case_ )
_lowerCamelCase : Dict = count_parameters(snake_case_ ) + count_parameters(snake_case_ )
assert torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowercase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path) | 96 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Any , a : List[Any] , a : Dict="</s>" , a : Tuple="<unk>" , a : Optional[int]="<pad>" , a : Any=100 , a : List[Any]=None , a : Optional[Dict[str, Any]] = None , a : Optional[int]=True , **a : Union[str, Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ : str = [f'''<extra_id_{i}>''' for i in range(a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase__ : Optional[int] = len(set(filter(lambda a : bool('extra_id' in str(a ) ) , a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
lowerCAmelCase__ : Optional[Any] = legacy
lowerCAmelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , legacy=a , **a , )
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : Dict = extra_ids
lowerCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@staticmethod
def _lowerCamelCase ( a : Optional[Any] , a : str , a : List[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCAmelCase__ : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , a , )
return max_model_length
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a )) + [1]
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda a : bool(re.search(R'<extra_id_\d+>' , a ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return [self._convert_token_to_id(a ) for token in self.get_sentinel_tokens()]
def _lowerCamelCase ( self : Optional[Any] , a : List[int] ):
'''simple docstring'''
if len(a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Any , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self._add_eos_if_not_present(a )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase__ : int = self._add_eos_if_not_present(a )
return token_ids_a + token_ids_a
def __getstate__( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.__dict__.copy()
lowerCAmelCase__ : Union[str, Any] = None
return state
def __setstate__( self : str , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : List[str] , a : "TextInput" , **a : int ):
'''simple docstring'''
if not self.legacy:
lowerCAmelCase__ : Any = SPIECE_UNDERLINE + text.replace(a , ' ' )
return super().tokenize(a , **a )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , **a : Tuple ):
'''simple docstring'''
if not self.legacy:
lowerCAmelCase__ : Dict = text.startswith(a )
if is_first:
lowerCAmelCase__ : Union[str, Any] = text[1:]
lowerCAmelCase__ : Tuple = self.sp_model.encode(a , out_type=a )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(a ):
lowerCAmelCase__ : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _lowerCamelCase ( self : List[Any] , a : List[str] ):
'''simple docstring'''
if token.startswith('<extra_id_' ):
lowerCAmelCase__ : Any = re.match(R'<extra_id_(\d+)>' , a )
lowerCAmelCase__ : Optional[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a )
def _lowerCamelCase ( self : List[str] , a : int ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ : int = self.sp_model.IdToPiece(a )
else:
lowerCAmelCase__ : Optional[int] = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _lowerCamelCase ( self : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : str = ''
lowerCAmelCase__ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(a )
lowerCAmelCase__ : Tuple = False
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self : Union[str, Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowerCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,) | 363 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 ) | 307 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :List[Any] = parent
UpperCamelCase :List[str] = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Optional[Any] = patch_size
UpperCamelCase :Optional[Any] = num_channels
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :List[Any] = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :Any = backbone_out_indices
UpperCamelCase :int = num_attention_heads
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :Optional[int] = hidden_dropout_prob
UpperCamelCase :int = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :Any = backbone_featmap_shape
UpperCamelCase :Optional[int] = scope
UpperCamelCase :Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Tuple = (image_size // patch_size) ** 2
UpperCamelCase :int = num_patches + 1
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :int = None
if self.use_labels:
UpperCamelCase :str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Tuple = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[int] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Tuple = self.num_labels
UpperCamelCase :Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :int = self.num_labels
UpperCamelCase :str = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : Union[str, Any] =False
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Tuple = [*signature.parameters.keys()]
UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :int = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = False
UpperCamelCase :Dict = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Dict = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Tuple = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :int = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :int = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = prepare_img()
UpperCamelCase :Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 259 |
import sys
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
UpperCamelCase :List[Any] = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase :Optional[Any] = a + chain_length - 1
UpperCamelCase :int = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase :int = cost
UpperCamelCase :List[str] = c
return matrix, sol
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if i == j:
print('''A''' + str(SCREAMING_SNAKE_CASE__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE__ )
print(''')''' , end=''' ''' )
def _A ( ):
UpperCamelCase :Optional[int] = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase , UpperCamelCase :Dict = matrix_chain_order(SCREAMING_SNAKE_CASE__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 259 | 1 |
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : int = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
lowercase__ : Tuple = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : str = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowercase__ : List[str] = corpus_without_punctuation.split('''\n''' )
lowercase__ : List[str] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Any:
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
return round(tf * idf , 3 )
| 350 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : List[Any] , a : int , a : Tuple=13 , a : int=32 , a : List[Any]=3 , a : Dict=4 , a : List[str]=[10, 20, 30, 40] , a : Any=[2, 2, 3, 2] , a : List[str]=True , a : Any=True , a : Optional[Any]=37 , a : Dict="gelu" , a : Tuple=10 , a : Dict=0.0_2 , a : Optional[Any]=["stage2", "stage3", "stage4"] , a : Optional[int]=[2, 3, 4] , a : int=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[int] = num_stages
lowerCAmelCase__ : str = hidden_sizes
lowerCAmelCase__ : Dict = depths
lowerCAmelCase__ : List[str] = is_training
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = out_features
lowerCAmelCase__ : str = out_indices
lowerCAmelCase__ : Optional[Any] = scope
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self : int , a : Tuple , a : Dict , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = ConvNextVaModel(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self : Any , a : Tuple , a : Any , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = ConvNextVaForImageClassification(__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : int , a : str , a : Any , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : Dict = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : str = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : int = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class A__ ( __a , __a , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ConvNextVaModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase__ : str = True
if model_class.__name__ in [
*get_values(__a ),
*get_values(__a ),
]:
continue
lowerCAmelCase__ : Union[str, Any] = model_class(__a )
model.to(__a )
model.train()
lowerCAmelCase__ : Tuple = self._prepare_for_class(__a , __a , return_labels=__a )
lowerCAmelCase__ : Dict = model(**__a ).loss
loss.backward()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__a ), *get_values(__a )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase__ : List[Any] = model_class(__a )
model.to(__a )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase__ : List[str] = self._prepare_for_class(__a , __a , return_labels=__a )
lowerCAmelCase__ : int = model(**__a ).loss
loss.backward()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(__a )
lowerCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : str = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(a : Any , a : Tuple , a : Tuple ):
lowerCAmelCase__ : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(__a , __a ) )
lowerCAmelCase__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ : Any = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = ConvNextVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(__a )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Any = prepare_img()
lowerCAmelCase__ : int = preprocessor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : int = model(**__a )
# verify the logits
lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCAmelCase__ : Tuple = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) | 212 |
from math import pi
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 233 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[int] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase : Any = 1.6021e-19 # units = C
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
lowerCAmelCase_ = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 279 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]:
"""simple docstring"""
snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) )
snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )]
index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase )
snake_case_ : float = 0
snake_case_ : list[float] = [0] * len(_UpperCamelCase )
for i in index:
if weight[i] <= capacity:
snake_case_ : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case_ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 1 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 1 |
def lowerCamelCase_ ( _a : Any = 100 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = set()
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCAmelCase ):
for b in range(2 , _lowerCAmelCase ):
UpperCAmelCase_ : Any = a**b # calculates the current power
collect_powers.add(_lowerCAmelCase ) # adds the result to the set
return len(_lowerCAmelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 345 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if (ksize % 2) == 0:
__lowercase =ksize + 1
__lowercase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCAmelCase ):
for x in range(_lowerCAmelCase ):
# distance from center
__lowercase =x - ksize // 2
__lowercase =y - ksize // 2
# degree to radiant
__lowercase =theta / 180 * np.pi
__lowercase =np.cos(_theta )
__lowercase =np.sin(_theta )
# get kernel x
__lowercase =cos_theta * px + sin_theta * py
# get kernel y
__lowercase =-sin_theta * px + cos_theta * py
# fill kernel
__lowercase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCamelCase = out / out.max() * 255
lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 166 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a__: Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self,__lowerCamelCase = True,__lowerCamelCase = 1 / 255,__lowerCamelCase = True,__lowerCamelCase = 8,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase )
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
A__ = pad_size
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = None,**__lowerCamelCase ):
return rescale(__lowerCamelCase,scale=__lowerCamelCase,data_format=__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = None ):
A__ , A__ = get_image_size(__lowerCamelCase )
A__ = (old_height // size + 1) * size - old_height
A__ = (old_width // size + 1) * size - old_width
return pad(__lowerCamelCase,((0, pad_height), (0, pad_width)),mode='''symmetric''',data_format=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = ChannelDimension.FIRST,**__lowerCamelCase,):
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_pad if do_pad is not None else self.do_pad
A__ = pad_size if pad_size is not None else self.pad_size
A__ = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=__lowerCamelCase,scale=__lowerCamelCase ) for image in images]
if do_pad:
A__ = [self.pad(__lowerCamelCase,size=__lowerCamelCase ) for image in images]
A__ = [to_channel_dimension_format(__lowerCamelCase,__lowerCamelCase ) for image in images]
A__ = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase,tensor_type=__lowerCamelCase )
| 39 |
import os
import sys
a__: int = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a__: Union[str, Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] )->Any:
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Any )->Dict:
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] )->int:
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] )->Any:
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] )->int:
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any )->Optional[Any]:
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Any , **UpperCamelCase__ : Union[str, Any] )->Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 39 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Tuple ="ssube/stable-diffusion-x4-upscaler-onnx"
def lowercase__ ( self , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case__ ) )
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_dummy_inputs()
lowerCAmelCase : Optional[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Tuple = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_dummy_inputs()
lowerCAmelCase : Any = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_dummy_inputs()
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Tuple = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
lowerCAmelCase : List[str] = pipe(**snake_case__ ).images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Union[str, Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs()
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : int = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = ort.SessionOptions()
lowerCAmelCase : Tuple = False
return options
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase : Optional[int] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : List[str] = "A fantasy landscape, trending on artstation"
lowerCAmelCase : str = torch.manual_seed(0 )
lowerCAmelCase : Any = pipe(
prompt=snake_case__ , image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : Tuple = output.images
lowerCAmelCase : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase : Tuple = init_image.resize((128, 128) )
lowerCAmelCase : Dict = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = "A fantasy landscape, trending on artstation"
lowerCAmelCase : int = torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = pipe(
prompt=snake_case__ , image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : Any = output.images
lowerCAmelCase : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase : List[str] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 108 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : str =BigBirdTokenizer
a : Union[str, Any] =BigBirdTokenizerFast
a : Tuple =True
a : Any =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : str = self.tokenizer_class(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "<s>"
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(snake_case__ ) , 1_004 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase : Tuple = self.get_tokenizer()
lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = "I was born in 92000, and this is falsé."
lowerCAmelCase : Optional[int] = tokenizer.tokenize(snake_case__ )
lowerCAmelCase : int = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase : int = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ )
lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = BigBirdTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "Hello World!"
lowerCAmelCase : Any = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowerCAmelCase : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase : int = " ".join(snake_case__ )
lowerCAmelCase : Dict = self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : str = BigBirdConfig(attention_type="original_full" )
lowerCAmelCase : Any = BigBirdModel(snake_case__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case__ )
model(**snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowerCAmelCase : Union[str, Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 108 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_snake_case : Dict = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : bool , lowerCAmelCase_ : str = None , lowerCAmelCase_ : list = None ) -> Union[str, Any]:
"""simple docstring"""
_a = None
_a = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_a = os.path.abspath('''examples''' )
for item in os.listdir(lowerCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
_a = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase_ , feature_script=lowerCAmelCase_ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_a = compare_against_test(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a = '''\n'''.join(lowerCAmelCase_ )
if special_strings is not None:
for string in special_strings:
_a = diff.replace(lowerCAmelCase_ , '''''' )
self.assertEqual(lowerCAmelCase_ , '''''' )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , lowerCAmelCase_ )
self.one_complete_example('''complete_nlp_example.py''' , lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_a = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.one_complete_example('''complete_cv_example.py''' , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@mock.patch.dict(os.environ ,{'TESTING_MOCKED_DATALOADERS': '1'} )
class A ( _a ):
lowercase_ = False
@classmethod
def __lowerCAmelCase ( cls : Dict ) -> Any:
"""simple docstring"""
super().setUpClass()
_a = tempfile.mkdtemp()
_a = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_a = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : List[Any] ) -> Dict:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_a = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_a = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
_a = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_a = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
_a = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
self.assertNotIn('''epoch 0:''' , lowerCAmelCase_ )
self.assertIn('''epoch 1:''' , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
_a = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
if torch.cuda.is_available():
_a = torch.cuda.device_count()
else:
_a = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , lowerCAmelCase_ )
self.assertIn('''epoch 1:''' , lowerCAmelCase_ )
else:
self.assertIn('''epoch 0:''' , lowerCAmelCase_ )
self.assertIn('''epoch 1:''' , lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_a = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_a = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
_a = re.findall('''({.+})''' , lowerCAmelCase_ )
_a = [r for r in results if '''accuracy''' in r][-1]
_a = ast.literal_eval(lowerCAmelCase_ )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
_a = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , '''tracking''' ) ) )
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
_a = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_a = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 368 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case_ (UpperCamelCase : Dict ):
'''simple docstring'''
_a = {}
_a = job['''started_at''']
_a = job['''completed_at''']
_a = date_parser.parse(UpperCamelCase )
_a = date_parser.parse(UpperCamelCase )
_a = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_a = start
_a = end
_a = duration_in_min
return job_info
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int=None ):
'''simple docstring'''
_a = None
if token is not None:
_a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
_a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
_a = requests.get(UpperCamelCase , headers=UpperCamelCase ).json()
_a = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase ) for job in result['''jobs''']} )
_a = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCamelCase ):
_a = requests.get(url + f'&page={i + 2}' , headers=UpperCamelCase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_snake_case : Tuple = parser.parse_args()
_snake_case : int = get_job_time(args.workflow_run_id)
_snake_case : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 179 | 0 |
"""simple docstring"""
import os
def _snake_case ( ):
_lowerCamelCase : Dict = os.path.dirname(os.path.realpath(lowercase__ ) )
_lowerCamelCase : int = os.path.join(lowercase__ , 'triangle.txt' )
with open(lowercase__ ) as f:
_lowerCamelCase : str = f.readlines()
_lowerCamelCase : Optional[Any] = []
for line in triangle:
_lowerCamelCase : Optional[int] = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(lowercase__ ) )
a.append(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
for j in range(len(a[i] ) ):
_lowerCamelCase : str = a[i - 1][j] if j != len(a[i - 1] ) else 0
_lowerCamelCase : Any = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowercase__ , lowercase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 96 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states | 96 | 1 |
SCREAMING_SNAKE_CASE_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = from_type.lower().strip("""s""" )
SCREAMING_SNAKE_CASE = to_type.lower().strip("""s""" )
SCREAMING_SNAKE_CASE = UNIT_SYMBOL.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = UNIT_SYMBOL.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if from_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(_SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if to_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(_SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = METRIC_CONVERSION[from_sanitized]
SCREAMING_SNAKE_CASE = METRIC_CONVERSION[to_sanitized]
SCREAMING_SNAKE_CASE = 1
if from_exponent > to_exponent:
SCREAMING_SNAKE_CASE = from_exponent - to_exponent
else:
SCREAMING_SNAKE_CASE = -(to_exponent - from_exponent)
return value * pow(10 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 193 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if not self.initialized:
SCREAMING_SNAKE_CASE = RagRetriever(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,index=lowerCamelCase__ ,init_retrieval=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
'''simple docstring'''
self.retriever.index.init_index()
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.retriever._main_retrieve(lowerCamelCase__ ,lowerCamelCase__ )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowerCamelCase__ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,index=lowerCamelCase__ ,init_retrieval=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
for worker in self.retrieval_workers
] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ray.get(random_worker.retrieve.remote(lowerCamelCase__ ,lowerCamelCase__ ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self._main_retrieve(lowerCamelCase__ ,lowerCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any]=None ,**lowerCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
return super(lowerCamelCase__ ,cls ).get_tokenizers(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.pop("""config""" ,lowerCamelCase__ ) or RagConfig.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(lowerCamelCase__ ,config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE = rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE = """custom"""
SCREAMING_SNAKE_CASE = CustomHFIndex(config.retrieval_vector_size ,lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = cls._build_index(lowerCamelCase__ )
return cls(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,retrieval_workers=lowerCamelCase__ ,index=lowerCamelCase__ ,)
| 193 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase__ ( a__: int ) -> List[str]:
'''simple docstring'''
def is_in_circle(a__: float , a__: float ) -> bool:
_UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(a__ ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase__ ( a__: int , a__: Callable[[float], float] , a__: float = 0.0 , a__: float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(a__ , a__ ) ) for _ in range(a__ ) ) * (max_value - min_value)
def lowerCAmelCase__ ( a__: int , a__: float = 0.0 , a__: float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(a__: float ) -> float:
return x
_UpperCAmelCase = area_under_curve_estimator(
a__ , a__ , a__ , a__ )
_UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def lowerCAmelCase__ ( a__: int ) -> None:
'''simple docstring'''
def function_to_integrate(a__: float ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase = area_under_curve_estimator(
a__ , a__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329 | 1 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ = get_tests_dir('''fixtures/dummy-config.json''')
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : str ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = AutoConfig.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = AutoConfig.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__lowercase = os.path.join(lowercase__ ,'''fake-roberta''' )
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
with open(os.path.join(lowercase__ ,'''config.json''' ) ,'''w''' ) as f:
f.write(json.dumps({} ) )
__lowercase = AutoConfig.from_pretrained(lowercase__ )
self.assertEqual(type(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
try:
AutoConfig.register('''custom''' ,lowercase__ )
# Wrong model type will raise an error
with self.assertRaises(lowercase__ ):
AutoConfig.register('''model''' ,lowercase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase__ ):
AutoConfig.register('''bert''' ,lowercase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase__ )
__lowercase = AutoConfig.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ ,lowercase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
lowercase__ ,'''bert-base is not a local folder and is not a valid model identifier''' ):
__lowercase = AutoConfig.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
lowercase__ ,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowercase = AutoConfig.from_pretrained(lowercase__ ,revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
with self.assertRaisesRegex(
lowercase__ ,'''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' ,):
__lowercase = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase__ ):
__lowercase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase__ ):
__lowercase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ,trust_remote_code=lowercase__ )
__lowercase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ,trust_remote_code=lowercase__ )
self.assertEqual(config.__class__.__name__ ,'''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase__ )
__lowercase = AutoConfig.from_pretrained(lowercase__ ,trust_remote_code=lowercase__ )
self.assertEqual(reloaded_config.__class__.__name__ ,'''NewModelConfig''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 'new-model'
try:
AutoConfig.register('''new-model''' ,lowercase__ )
# If remote code is not set, the default is to use local
__lowercase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ ,'''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
__lowercase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ,trust_remote_code=lowercase__ )
self.assertEqual(config.__class__.__name__ ,'''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
__lowercase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ,trust_remote_code=lowercase__ )
self.assertEqual(config.__class__.__name__ ,'''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 52 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _A ( A__ ):
"""simple docstring"""
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _A ( A__ ):
"""simple docstring"""
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _A ( A__ ):
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
lowerCAmelCase__ = 1
while K:
lowerCAmelCase__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
lowerCAmelCase__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 52 | 1 |
'''simple docstring'''
def a__ ( a__ = 1_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = n * (n + 1) * (2 * n + 1) / 6
__SCREAMING_SNAKE_CASE = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 267 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Tuple = 3
__A : Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Dict = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[int] = 'deformable_detr'
lowercase : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=3_00 , __UpperCamelCase=10_24 , __UpperCamelCase=6 , __UpperCamelCase=10_24 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=10_24 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=False , __UpperCamelCase=3_00 , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=0.25 , __UpperCamelCase=False , **__UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCamelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : Optional[int] = backbone_config.get("model_type" )
__UpperCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : List[str] = config_class.from_dict(__UpperCamelCase )
__UpperCamelCase : int = use_timm_backbone
__UpperCamelCase : Optional[Any] = backbone_config
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Optional[int] = num_queries
__UpperCamelCase : str = max_position_embeddings
__UpperCamelCase : Optional[Any] = d_model
__UpperCamelCase : Dict = encoder_ffn_dim
__UpperCamelCase : Tuple = encoder_layers
__UpperCamelCase : Any = encoder_attention_heads
__UpperCamelCase : Any = decoder_ffn_dim
__UpperCamelCase : List[Any] = decoder_layers
__UpperCamelCase : Union[str, Any] = decoder_attention_heads
__UpperCamelCase : List[str] = dropout
__UpperCamelCase : Optional[Any] = attention_dropout
__UpperCamelCase : Optional[int] = activation_dropout
__UpperCamelCase : Tuple = activation_function
__UpperCamelCase : Optional[Any] = init_std
__UpperCamelCase : Union[str, Any] = init_xavier_std
__UpperCamelCase : Any = encoder_layerdrop
__UpperCamelCase : Tuple = auxiliary_loss
__UpperCamelCase : Dict = position_embedding_type
__UpperCamelCase : Union[str, Any] = backbone
__UpperCamelCase : List[str] = use_pretrained_backbone
__UpperCamelCase : int = dilation
# deformable attributes
__UpperCamelCase : Union[str, Any] = num_feature_levels
__UpperCamelCase : Union[str, Any] = encoder_n_points
__UpperCamelCase : int = decoder_n_points
__UpperCamelCase : List[Any] = two_stage
__UpperCamelCase : Dict = two_stage_num_proposals
__UpperCamelCase : List[str] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__UpperCamelCase : Union[str, Any] = class_cost
__UpperCamelCase : Tuple = bbox_cost
__UpperCamelCase : Any = giou_cost
# Loss coefficients
__UpperCamelCase : Dict = mask_loss_coefficient
__UpperCamelCase : int = dice_loss_coefficient
__UpperCamelCase : List[Any] = bbox_loss_coefficient
__UpperCamelCase : Optional[int] = giou_loss_coefficient
__UpperCamelCase : Any = eos_coefficient
__UpperCamelCase : int = focal_alpha
__UpperCamelCase : Union[str, Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__UpperCamelCase : Optional[Any] = self.backbone_config.to_dict()
__UpperCamelCase : List[str] = self.__class__.model_type
return output | 171 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase : Union[str, Any] = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 171 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase__ : int = 4
lowerCAmelCase__ : List[str] = 48
lowerCAmelCase__ : List[Any] = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase__ : List[Any] = [6, 6, 6, 6]
lowerCAmelCase__ : List[str] = 60
lowerCAmelCase__ : Tuple = [6, 6, 6, 6]
lowerCAmelCase__ : Optional[Any] = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase__ : List[Any] = 4
lowerCAmelCase__ : int = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Optional[Any] = 126
lowerCAmelCase__ : str = 7
lowerCAmelCase__ : Union[str, Any] = 255.0
lowerCAmelCase__ : Optional[int] = """"""
return config
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase__ : List[Any] = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowerCAmelCase__ : Dict = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowerCAmelCase__ : List[Any] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowerCAmelCase__ : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase__ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase__ : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase__ : str = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase__ : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCAmelCase__ : int = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCAmelCase__ : Any = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCAmelCase__ : List[str] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowerCAmelCase__ : Any = """layernorm.weight"""
if name == "norm.bias":
lowerCAmelCase__ : Optional[Any] = """layernorm.bias"""
if "conv_first" in name:
lowerCAmelCase__ : Tuple = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase__ : Any = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowerCAmelCase__ : int = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase__ : str = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowerCAmelCase__ : List[str] = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowerCAmelCase__ : Any = """swin2sr.""" + name
return name
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : Optional[Any] = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
lowerCAmelCase__ : Dict = key.split(""".""" )
lowerCAmelCase__ : Any = int(key_split[1] )
lowerCAmelCase__ : Dict = int(key_split[4] )
lowerCAmelCase__ : List[str] = config.embed_dim
if "weight" in key:
lowerCAmelCase__ : List[str] = val[:dim, :]
lowerCAmelCase__ : int = val[dim : dim * 2, :]
lowerCAmelCase__ : Dict = val[-dim:, :]
else:
lowerCAmelCase__ : Union[str, Any] = val[:dim]
lowerCAmelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCAmelCase__ : List[Any] = val[-dim:]
pass
else:
lowerCAmelCase__ : Optional[int] = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = get_config(UpperCamelCase )
lowerCAmelCase__ : Any = SwinaSRForImageSuperResolution(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )
lowerCAmelCase__ : Optional[Any] = convert_state_dict(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(UpperCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
lowerCAmelCase__ : Union[str, Any] = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowerCAmelCase__ : Dict = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("""RGB""" )
lowerCAmelCase__ : int = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase__ : str = 126 if """Jpeg""" in checkpoint_url else 256
lowerCAmelCase__ : List[Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase__ : Optional[int] = transforms(UpperCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase__ : int = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase__ : Optional[Any] = model(UpperCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase__ : Tuple = torch.Size([1, 3, 512, 512] )
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase__ : str = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase__ : str = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase__ : Dict = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase__ : Tuple = torch.Size([1, 3, 512, 512] )
lowerCAmelCase__ : Dict = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase__ : Tuple = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase , atol=1e-3 )
print("""Looks ok!""" )
lowerCAmelCase__ : Optional[Any] = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowerCAmelCase__ : Optional[int] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_lowerCAmelCase = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : bytes ) -> None:
A = data
# Initialize hash values
A = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
A = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes:
A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64))
A = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
import hashlib
A = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A = f.read()
else:
A = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main() | 74 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=_lowerCAmelCase ):
UpperCamelCase = ["""speech"""]
def __init__( self : int, *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self, ['''speech'''] )
class _a ( metaclass=_lowerCAmelCase ):
UpperCamelCase = ["""speech"""]
def __init__( self : Dict, *lowerCAmelCase__ : Tuple, **lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self, ['''speech'''] )
| 357 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCamelCase_ ="""bart"""
UpperCamelCase_ =True
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
if LOAD_DENSE_INDEX:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Union[str, Any] = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase : str = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : List[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Dict = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase : List[Any] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
if LOAD_DENSE_INDEX:
_UpperCamelCase : List[Any] = faiss.StandardGpuResources()
_UpperCamelCase : List[str] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : Tuple = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
_UpperCamelCase : Optional[int] = faiss.IndexFlatIP(128 )
_UpperCamelCase : Tuple = faiss.index_cpu_to_gpu(_lowercase , 1 , _lowercase )
wikiaab_gpu_index_flat.add(_lowercase ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase : Tuple = (None, None)
_UpperCamelCase : List[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
_UpperCamelCase : Optional[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_UpperCamelCase : Any = elia['''train_eli5''']
_UpperCamelCase : Union[str, Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
_UpperCamelCase : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowercase )
return (elia_train, eli5_train_q_index)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_indexes()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_models()
UpperCamelCase_ , UpperCamelCase_ =load_train_data()
def a_ ( _lowercase , _lowercase=10 ):
_UpperCamelCase : Any = embed_questions_for_retrieval([question] , _lowercase , _lowercase )
_UpperCamelCase , _UpperCamelCase : List[Any] = eli5_train_q_index.search(_lowercase , _lowercase )
_UpperCamelCase : Tuple = [elia_train[int(_lowercase )] for i in I[0]]
return nn_examples
def a_ ( _lowercase , _lowercase="wiki40b" , _lowercase="dense" , _lowercase=10 ):
if source == "none":
_UpperCamelCase , _UpperCamelCase : List[str] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase : Dict = query_qa_dense_index(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
else:
_UpperCamelCase , _UpperCamelCase : List[str] = query_es_index(
_lowercase , _lowercase , index_name='''english_wiki40b_snippets_100w''' , n_results=_lowercase , )
_UpperCamelCase : Any = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : List[Any] = '''question: {} context: {}'''.format(_lowercase , _lowercase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowercase : None),
} )
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase=64 , _lowercase=256 , _lowercase=False , _lowercase=2 , _lowercase=0.95 , _lowercase=0.8 ):
with torch.no_grad():
_UpperCamelCase : List[Any] = qa_sas_generate(
_lowercase , _lowercase , _lowercase , num_answers=1 , num_beams=_lowercase , min_len=_lowercase , max_len=_lowercase , do_sample=_lowercase , temp=_lowercase , top_p=_lowercase , top_k=_lowercase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
UpperCamelCase_ ="""<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
UpperCamelCase_ ="""
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCamelCase_ ="""
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCamelCase_ =[
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
UpperCamelCase_ =st.sidebar.checkbox("""Demo options""")
if demo_options:
UpperCamelCase_ =st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
UpperCamelCase_ =action_list.index(action_st)
UpperCamelCase_ =st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
UpperCamelCase_ =show_type == """Show full text of passages"""
else:
UpperCamelCase_ =3
UpperCamelCase_ =True
UpperCamelCase_ =st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
UpperCamelCase_ ="""
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
UpperCamelCase_ ="""wiki40b"""
UpperCamelCase_ ="""dense"""
UpperCamelCase_ ="""beam"""
UpperCamelCase_ =2
UpperCamelCase_ =64
UpperCamelCase_ =256
UpperCamelCase_ =None
UpperCamelCase_ =None
UpperCamelCase_ =st.sidebar.checkbox("""Generation options""")
if generate_options:
UpperCamelCase_ ="""
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
UpperCamelCase_ =st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
UpperCamelCase_ =st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCamelCase_ =st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCamelCase_ =st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCamelCase_ =st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCamelCase_ =st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCamelCase_ =None
# start main text
UpperCamelCase_ =[
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
UpperCamelCase_ =st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCamelCase_ =st.text_input("""Enter your question here:""", """""")
else:
UpperCamelCase_ =question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""dense""", n_results=10)
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""sparse""", n_results=10)
UpperCamelCase_ =[]
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCamelCase_ =support_list[:10]
UpperCamelCase_ ="""<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCamelCase_ , UpperCamelCase_ =answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
UpperCamelCase_ ="""https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
UpperCamelCase_ =res[1].strip()
if sec_titles == "":
UpperCamelCase_ ="""[{}]({})""".format(res[0], wiki_url)
else:
UpperCamelCase_ =sec_titles.split(""" & """)
UpperCamelCase_ =""" & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCamelCase_ =find_nearest_training(question)
UpperCamelCase_ =nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
UpperCamelCase_ =[
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
UpperCamelCase_ ="""
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 128 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]="attention" ):
__a : Union[str, Any] = params[f"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a : Optional[int] = params[f"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a : Optional[Any] = params[f"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a : str = params[f"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : str=False ):
if split_mlp_wi:
__a : Optional[int] = params[f"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a : str = params[f"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a : Any = (wi_a, wi_a)
else:
__a : List[Any] = params[f"{prefix}/layers_{i}/mlp/wi/kernel"]
__a : Union[str, Any] = params[f"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ):
return params[f"{prefix}/layers_{i}/{layer_name}/scale"]
def __UpperCamelCase ( lowerCAmelCase__ : dict , *, lowerCAmelCase__ : int , lowerCAmelCase__ : bool ):
__a : Any = traverse_util.flatten_dict(variables['''target'''] )
__a : Any = {'''/'''.join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a : str = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , lowerCAmelCase__ )
__a : Union[str, Any] = collections.OrderedDict()
# Shared embeddings.
__a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__a : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''pre_attention_layer_norm''' )
__a , __a , __a , __a : Any = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''attention''' )
__a : Tuple = layer_norm
__a : Optional[Any] = k.T
__a : List[str] = o.T
__a : Optional[Any] = q.T
__a : Optional[int] = v.T
# Block i, layer 1 (MLP).
__a : int = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''pre_mlp_layer_norm''' )
__a , __a : List[Any] = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , lowerCAmelCase__ )
__a : List[str] = layer_norm
if split_mlp_wi:
__a : List[str] = wi[0].T
__a : str = wi[1].T
else:
__a : str = wi.T
__a : str = wo.T
__a : Optional[int] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__a : Tuple = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__a : List[Any] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
__a , __a , __a , __a : Tuple = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''self_attention''' )
__a : List[Any] = layer_norm
__a : Union[str, Any] = k.T
__a : Tuple = o.T
__a : Union[str, Any] = q.T
__a : Dict = v.T
# Block i, layer 1 (Cross Attention).
__a : List[str] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
__a , __a , __a , __a : Tuple = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''encoder_decoder_attention''' )
__a : List[str] = layer_norm
__a : Optional[int] = k.T
__a : Optional[Any] = o.T
__a : str = q.T
__a : Optional[int] = v.T
# Block i, layer 2 (MLP).
__a : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_mlp_layer_norm''' )
__a , __a : str = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , lowerCAmelCase__ )
__a : List[Any] = layer_norm
if split_mlp_wi:
__a : List[str] = wi[0].T
__a : int = wi[1].T
else:
__a : Optional[Any] = wi.T
__a : Tuple = wo.T
__a : List[str] = old['''decoder/decoder_norm/scale''']
__a : int = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a : Tuple = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : bool ):
__a : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a : List[str] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a : List[str] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__a : Any = state_dict['''shared.weight''']
return state_dict
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
__a : List[str] = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
__a : str = convert_tax_to_pytorch(lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ )
__a : int = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : bool = False ):
__a : int = TaConfig.from_json_file(lowerCAmelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a : Dict = TaEncoderModel(lowerCAmelCase__ )
else:
__a : Union[str, Any] = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print('''Done''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
lowercase__ =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 216 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
lowercase__ ={
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
lowercase__ ={
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Optional[int] = RoFormerTokenizer
def __init__(self : List[str] , snake_case_ : Optional[int]=None , snake_case_ : str=None , snake_case_ : Optional[Any]=True , snake_case_ : str="[UNK]" , snake_case_ : Dict="[SEP]" , snake_case_ : Any="[PAD]" , snake_case_ : str="[CLS]" , snake_case_ : List[Any]="[MASK]" , snake_case_ : Any=True , snake_case_ : List[str]=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , snake_case_ ) != strip_accents
):
__a : List[str] = getattr(snake_case_ , pre_tok_state.pop('''type''' ) )
__a : Optional[Any] = do_lower_case
__a : Optional[int] = strip_accents
__a : List[str] = pre_tok_class(**snake_case_ )
__a : Optional[Any] = do_lower_case
def __getstate__(self : Union[str, Any] ):
__a : Any = self.__dict__.copy()
__a : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__(self : Tuple , snake_case_ : Optional[Any] ):
__a : Dict = d
__a : str = self.__dict__['''_tokenizer'''].get_vocab()
__a : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=None ):
__a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : int = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
__a : Optional[Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
__a : List[str] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 216 | 1 |
from math import ceil
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Optional[Any] = list(range(0, snake_case__ ) )
__UpperCAmelCase : Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__UpperCAmelCase : str = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
__UpperCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
__UpperCAmelCase : Union[str, Any] = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(snake_case__ ) )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
__UpperCAmelCase : Dict = list(range(snake_case__ ) )
__UpperCAmelCase : str = int(ceil(n_layers / len(snake_case__ ) ) )
__UpperCAmelCase : Any = [layers[i : i + n_blocks] for i in range(0, snake_case__, snake_case__ )]
return dict(zip(snake_case__, snake_case__ ) )
| 342 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.