code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = VideoToVideoSDPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
UpperCAmelCase__ : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase__ : int = False
# No `output_type`.
UpperCAmelCase__ : int = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _a ( self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
__UpperCamelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCamelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__UpperCamelCase =CLIPTextModel(A_ )
__UpperCamelCase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _a ( self , A_ , A_=0 ) -> Optional[Any]:
# 3 frames
__UpperCamelCase =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(A_ )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _a ( self ) -> Tuple:
__UpperCamelCase ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =VideoToVideoSDPipeline(**A_ )
__UpperCamelCase =sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_dummy_inputs(A_ )
__UpperCamelCase ='np'
__UpperCamelCase =sd_pipe(**A_ ).frames
__UpperCamelCase =frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase =np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _a ( self ) -> int:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _a ( self ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _a ( self ) -> Any:
pass
def _a ( self ) -> Optional[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Tuple:
__UpperCamelCase =VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase =torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase =torch.randn((1, 10, 3, 1024, 576) , generator=A_ )
__UpperCamelCase =video.to('cuda' )
__UpperCamelCase ='Spiderman is surfing'
__UpperCamelCase =pipe(A_ , video=A_ , generator=A_ , num_inference_steps=3 , output_type='pt' ).frames
__UpperCamelCase =np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 682 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = "lxmert"
UpperCAmelCase__ : str = {}
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=9500 , A_=1600 , A_=400 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1E-12 , A_=9 , A_=5 , A_=5 , A_=2048 , A_=4 , A_=6.67 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=True , A_=True , **A_ , ) -> int:
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =num_qa_labels
__UpperCamelCase =num_object_labels
__UpperCamelCase =num_attr_labels
__UpperCamelCase =l_layers
__UpperCamelCase =x_layers
__UpperCamelCase =r_layers
__UpperCamelCase =visual_feat_dim
__UpperCamelCase =visual_pos_dim
__UpperCamelCase =visual_loss_normalizer
__UpperCamelCase =task_matched
__UpperCamelCase =task_mask_lm
__UpperCamelCase =task_obj_predict
__UpperCamelCase =task_qa
__UpperCamelCase =visual_obj_loss
__UpperCamelCase =visual_attr_loss
__UpperCamelCase =visual_feat_loss
__UpperCamelCase ={'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**A_ )
| 682 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any="attention" ):
__UpperCamelCase =params[F'{prefix}/layers_{i}/{layer_name}/key/kernel']
__UpperCamelCase =params[F'{prefix}/layers_{i}/{layer_name}/out/kernel']
__UpperCamelCase =params[F'{prefix}/layers_{i}/{layer_name}/query/kernel']
__UpperCamelCase =params[F'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ):
if split_mlp_wi:
__UpperCamelCase =params[F'{prefix}/layers_{i}/mlp/wi_0/kernel']
__UpperCamelCase =params[F'{prefix}/layers_{i}/mlp/wi_1/kernel']
__UpperCamelCase =(wi_a, wi_a)
else:
__UpperCamelCase =params[F'{prefix}/layers_{i}/mlp/wi/kernel']
__UpperCamelCase =params[F'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
return params[F'{prefix}/layers_{i}/{layer_name}/scale']
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool ):
__UpperCamelCase =traverse_util.flatten_dict(variables['target'] )
__UpperCamelCase ={'/'.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase ='encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase =old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'attention' )
__UpperCamelCase =layer_norm
__UpperCamelCase =k.T
__UpperCamelCase =o.T
__UpperCamelCase =q.T
__UpperCamelCase =v.T
# Block i, layer 1 (MLP).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase =tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =layer_norm
if split_mlp_wi:
__UpperCamelCase =wi[0].T
__UpperCamelCase =wi[1].T
else:
__UpperCamelCase =wi.T
__UpperCamelCase =wo.T
__UpperCamelCase =old[
'encoder/relpos_bias/rel_embedding'
].T
__UpperCamelCase =old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_self_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'self_attention' )
__UpperCamelCase =layer_norm
__UpperCamelCase =k.T
__UpperCamelCase =o.T
__UpperCamelCase =q.T
__UpperCamelCase =v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_cross_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'encoder_decoder_attention' )
__UpperCamelCase =layer_norm
__UpperCamelCase =k.T
__UpperCamelCase =o.T
__UpperCamelCase =q.T
__UpperCamelCase =v.T
# Block i, layer 2 (MLP).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase =tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =layer_norm
if split_mlp_wi:
__UpperCamelCase =wi[0].T
__UpperCamelCase =wi[1].T
else:
__UpperCamelCase =wi.T
__UpperCamelCase =wo.T
__UpperCamelCase =old['decoder/decoder_norm/scale']
__UpperCamelCase =old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase =old['decoder/logits_dense/kernel'].T
return new
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : bool ):
__UpperCamelCase =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase =state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase =state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__UpperCamelCase =state_dict['shared.weight']
return state_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =convert_tax_to_pytorch(SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : bool = False ):
__UpperCamelCase =TaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase =TaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =TaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('Done' )
if __name__ == "__main__":
_A = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
_A = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 682 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_A = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
_A = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = ' Hello world! cécé herlolip'
_A = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =[
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =dct.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
__UpperCamelCase =torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase =emb.weight.shape
__UpperCamelCase =nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=None ):
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =torch.hub.load('pytorch/fairseq' , SCREAMING_SNAKE_CASE__ ).eval()
else:
__UpperCamelCase =load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__UpperCamelCase =checkpoint_path.replace('.' , '-' )
__UpperCamelCase =BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
__UpperCamelCase =BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
F'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__UpperCamelCase =bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bart.predict('mnli' , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
__UpperCamelCase =bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =state_dict['decoder.embed_tokens.weight']
__UpperCamelCase =bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
__UpperCamelCase =BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
__UpperCamelCase =BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , 'lm_head' ):
__UpperCamelCase =make_linear_from_emb(model.model.shared )
__UpperCamelCase =model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
_A = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 682 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_A = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _UpperCAmelCase ( ):
__UpperCamelCase =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__UpperCamelCase =get_sagemaker_input()
else:
__UpperCamelCase =get_cluster_input()
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('config' , description=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate config command' , description=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =get_user_input()
if args.config_file is not None:
__UpperCamelCase =args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(SCREAMING_SNAKE_CASE__ )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE__ )
print(F'accelerate configuration saved at {config_file}' )
def _UpperCAmelCase ( ):
__UpperCamelCase =config_command_parser()
__UpperCamelCase =parser.parse_args()
config_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_A = logging.get_logger(__name__)
_A = Dict[str, Any]
_A = List[Prediction]
@add_end_docstrings(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , *A_ , **A_ ) -> List[Any]:
super().__init__(*A_ , **A_ )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **A_ ) -> int:
__UpperCamelCase ={}
if "threshold" in kwargs:
__UpperCamelCase =kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *A_ , **A_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*A_ , **A_ )
def _a ( self , A_ ) -> int:
__UpperCamelCase =load_image(A_ )
__UpperCamelCase =torch.IntTensor([[image.height, image.width]] )
__UpperCamelCase =self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
__UpperCamelCase =self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
__UpperCamelCase =target_size
return inputs
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =model_inputs.pop('target_size' )
__UpperCamelCase =self.model(**A_ )
__UpperCamelCase =outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
__UpperCamelCase =model_inputs['bbox']
return model_outputs
def _a ( self , A_ , A_=0.9 ) -> int:
__UpperCamelCase =model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__UpperCamelCase , __UpperCamelCase =target_size[0].tolist()
def unnormalize(A_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__UpperCamelCase , __UpperCamelCase =model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__UpperCamelCase =[self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__UpperCamelCase =[unnormalize(A_ ) for bbox in model_outputs['bbox'].squeeze(0 )]
__UpperCamelCase =['score', 'label', 'box']
__UpperCamelCase =[dict(zip(A_ , A_ ) ) for vals in zip(scores.tolist() , A_ , A_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__UpperCamelCase =self.image_processor.post_process_object_detection(A_ , A_ , A_ )
__UpperCamelCase =raw_annotations[0]
__UpperCamelCase =raw_annotation['scores']
__UpperCamelCase =raw_annotation['labels']
__UpperCamelCase =raw_annotation['boxes']
__UpperCamelCase =scores.tolist()
__UpperCamelCase =[self.model.config.idalabel[label.item()] for label in labels]
__UpperCamelCase =[self._get_bounding_box(A_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__UpperCamelCase =['score', 'label', 'box']
__UpperCamelCase =[
dict(zip(A_ , A_ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def _a ( self , A_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =box.int().tolist()
__UpperCamelCase ={
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 682 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "xlm-roberta"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1E-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Dict:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =use_cache
__UpperCamelCase =classifier_dropout
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 682 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =UniSpeechSatForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =downstream_dict['projector.weight']
__UpperCamelCase =downstream_dict['projector.bias']
__UpperCamelCase =downstream_dict['model.post_net.linear.weight']
__UpperCamelCase =downstream_dict['model.post_net.linear.bias']
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =UniSpeechSatForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =downstream_dict['model.linear.weight']
__UpperCamelCase =downstream_dict['model.linear.bias']
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =UniSpeechSatForXVector.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =downstream_dict['connector.weight']
__UpperCamelCase =downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCamelCase =downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
__UpperCamelCase =downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
__UpperCamelCase =downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
__UpperCamelCase =downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
__UpperCamelCase =downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
__UpperCamelCase =downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
__UpperCamelCase =downstream_dict['objective.W']
return model
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
__UpperCamelCase =checkpoint['Downstream']
__UpperCamelCase =UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__UpperCamelCase =convert_classification(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif arch.endswith('ForAudioFrameClassification' ):
__UpperCamelCase =convert_diarization(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif arch.endswith('ForXVector' ):
__UpperCamelCase =convert_xvector(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
__UpperCamelCase =checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_A = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 682 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =3_84
if "tiny" in model_name:
__UpperCamelCase =[3, 3, 9, 3]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
if "small" in model_name:
__UpperCamelCase =[3, 3, 27, 3]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
if "base" in model_name:
__UpperCamelCase =[3, 3, 27, 3]
__UpperCamelCase =[1_28, 2_56, 5_12, 10_24]
__UpperCamelCase =5_12
if "large" in model_name:
__UpperCamelCase =[3, 3, 27, 3]
__UpperCamelCase =[1_92, 3_84, 7_68, 15_36]
__UpperCamelCase =7_68
if "xlarge" in model_name:
__UpperCamelCase =[3, 3, 27, 3]
__UpperCamelCase =[2_56, 5_12, 10_24, 20_48]
__UpperCamelCase =10_24
# set label information
__UpperCamelCase =1_50
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase ='ade20k-id2label.json'
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =ConvNextConfig(
depths=SCREAMING_SNAKE_CASE__ , hidden_sizes=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__UpperCamelCase =UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =[]
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =dct.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase ={
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
__UpperCamelCase =model_name_to_url[model_name]
__UpperCamelCase =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['state_dict']
__UpperCamelCase =get_upernet_config(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
__UpperCamelCase =key.replace('bn' , 'batch_norm' )
__UpperCamelCase =val
# rename keys
__UpperCamelCase =create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
__UpperCamelCase ='https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )
__UpperCamelCase =SegformerImageProcessor()
__UpperCamelCase =processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
if model_name == "upernet-convnext-tiny":
__UpperCamelCase =torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCamelCase =torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCamelCase =torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCamelCase =torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCamelCase =torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 682 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_A = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
_A = 'hopper-medium-v2'
_A = gym.make(env_name)
_A = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
_A = env.reset()
_A = 0
_A = 0
_A = 1000
_A = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_A = pipeline(obs, planning_horizon=32)
# execute action in environment
_A , _A , _A , _A = env.step(denorm_actions)
_A = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_A = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 682 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# save results
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE__ , 'pytorch_model.bin' ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
__UpperCamelCase =2
if unlogit:
__UpperCamelCase =torch.pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =p * torch.log(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =0
return -plogp.sum(dim=-1 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
logger.info('lv, h >\t' + '\t'.join(F'{x + 1}' for x in range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE__ ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=False ):
__UpperCamelCase , __UpperCamelCase =model.config.num_hidden_layers, model.config.num_attention_heads
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(args.device )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(args.device )
if head_mask is None:
__UpperCamelCase =torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__UpperCamelCase =None
__UpperCamelCase =0.0
__UpperCamelCase =0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__UpperCamelCase =tuple(t.to(args.device ) for t in inputs )
((__UpperCamelCase) , ) =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =entropy(attn.detach() , SCREAMING_SNAKE_CASE__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__UpperCamelCase =2
__UpperCamelCase =torch.pow(torch.pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__UpperCamelCase =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
logger.info('Head ranked by importance scores' )
__UpperCamelCase =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__UpperCamelCase =torch.arange(
head_importance.numel() , device=args.device )
__UpperCamelCase =head_ranks.view_as(SCREAMING_SNAKE_CASE__ )
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
return attn_entropy, head_importance, total_loss
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , SCREAMING_SNAKE_CASE__ , original_score * args.masking_threshold )
__UpperCamelCase =torch.ones_like(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__UpperCamelCase =original_score
while current_score >= original_score * args.masking_threshold:
__UpperCamelCase =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__UpperCamelCase =float('Inf' )
__UpperCamelCase =head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__UpperCamelCase =current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__UpperCamelCase =new_head_mask.view(-1 )
__UpperCamelCase =0.0
__UpperCamelCase =new_head_mask.view_as(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
# Compute metric and head importance again
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , SCREAMING_SNAKE_CASE__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =datetime.now()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , compute_importance=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =1 / loss
__UpperCamelCase =datetime.now() - before_time
__UpperCamelCase =sum(p.numel() for p in model.parameters() )
__UpperCamelCase ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[
v,
]
assert sum(len(SCREAMING_SNAKE_CASE__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =sum(p.numel() for p in model.parameters() )
__UpperCamelCase =datetime.now()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , compute_importance=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , actually_pruned=SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =1 / loss
__UpperCamelCase =datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(SCREAMING_SNAKE_CASE__ , args.output_dir )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=SCREAMING_SNAKE_CASE__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=SCREAMING_SNAKE_CASE__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=SCREAMING_SNAKE_CASE__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=SCREAMING_SNAKE_CASE__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=SCREAMING_SNAKE_CASE__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='Batch size.' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__UpperCamelCase =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__UpperCamelCase =torch.device('cuda' , args.local_rank )
__UpperCamelCase =1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__UpperCamelCase =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__UpperCamelCase =nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE__ )
elif args.n_gpu > 1:
__UpperCamelCase =nn.DataParallel(SCREAMING_SNAKE_CASE__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ )
# Prepare dataset
__UpperCamelCase =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__UpperCamelCase =(torch.from_numpy(SCREAMING_SNAKE_CASE__ ),)
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__UpperCamelCase =mask_heads(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
prune_heads(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_A = get_tests_dir('fixtures')
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__UpperCamelCase =mock.Mock()
__UpperCamelCase =500
__UpperCamelCase ={}
__UpperCamelCase =HTTPError
__UpperCamelCase ={}
# Download this model to make sure it's in the cache.
__UpperCamelCase =ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
__UpperCamelCase =ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ) -> str:
# This test is for deprecated behavior and can be removed in v5
__UpperCamelCase =ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def _a ( self ) -> Any:
with self.assertRaises(A_ ):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCamelCase =AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
__UpperCamelCase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(A_ )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls ) -> List[str]:
__UpperCamelCase =TOKEN
HfFolder.save_token(A_ )
@classmethod
def _a ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
__UpperCamelCase =ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id='test-image-processor' , push_to_hub=A_ , use_auth_token=self._token )
__UpperCamelCase =ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def _a ( self ) -> Dict:
__UpperCamelCase =ViTImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
__UpperCamelCase =ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id='valid_org/test-image-processor-org' , push_to_hub=A_ , use_auth_token=self._token )
__UpperCamelCase =ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def _a ( self ) -> Union[str, Any]:
CustomImageProcessor.register_for_auto_class()
__UpperCamelCase =CustomImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
__UpperCamelCase =AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 682 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_A = parse(importlib.metadata.version('torch'))
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Version] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
__UpperCamelCase =STR_OPERATION_TO_FUNC[operation]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =parse(importlib.metadata.version(SCREAMING_SNAKE_CASE__ ) )
return operation(SCREAMING_SNAKE_CASE__ , parse(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
return compare_versions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_A = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = True , ) -> str:
__UpperCamelCase =[file for file in os.listdir(A_ ) if os.path.isfile(os.path.join(A_ , A_ ) )]
if identifier is not None:
__UpperCamelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(A_ , A_ ):
for n_ in n_identifier:
__UpperCamelCase =[file for file in files if n_ not in file]
else:
__UpperCamelCase =[file for file in files if n_identifier not in file]
__UpperCamelCase =ignore_files or []
ignore_files.append('__init__.py' )
__UpperCamelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , A_ )
if only_modules:
__UpperCamelCase =file.split('.' )[0]
try:
__UpperCamelCase =getattr(A_ , A_ )
__UpperCamelCase =doctest.DocTestSuite(A_ )
__UpperCamelCase =unittest.TextTestRunner().run(A_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
__UpperCamelCase =doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =Path('src/transformers' )
__UpperCamelCase ='modeling'
__UpperCamelCase =[
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(A_ , identifier=A_ , ignore_files=A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =Path('src/transformers' )
__UpperCamelCase ='tokenization'
self.analyze_directory(A_ , identifier=A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =Path('src/transformers' )
__UpperCamelCase ='configuration'
self.analyze_directory(A_ , identifier=A_ )
def _a ( self ) -> Any:
__UpperCamelCase =Path('src/transformers' )
__UpperCamelCase =['configuration', 'modeling', 'tokenization']
self.analyze_directory(A_ , n_identifier=A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =Path('docs/source' )
__UpperCamelCase =['favicon.ico']
self.analyze_directory(A_ , ignore_files=A_ , only_modules=A_ )
| 682 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
__UpperCamelCase =AutoTokenizer.from_pretrained('xlm-roberta-base' )
__UpperCamelCase ='The dog is cute and lives in the garden house'
__UpperCamelCase =jnp.array([tokenizer.encode(A_ )] )
__UpperCamelCase =(1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase =jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__UpperCamelCase =model(A_ )['last_hidden_state']
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A_ , atol=1E-3 ) )
| 682 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = "timesformer"
def __init__( self , A_=224 , A_=16 , A_=3 , A_=8 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-6 , A_=True , A_="divided_space_time" , A_=0 , **A_ , ) -> int:
super().__init__(**A_ )
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =num_frames
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =qkv_bias
__UpperCamelCase =attention_type
__UpperCamelCase =drop_path_rate
| 682 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_A = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
_A = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
_A = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
_A = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
_A = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
for tf_name, hf_name in patterns:
__UpperCamelCase =k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ):
__UpperCamelCase =BigBirdPegasusConfig(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BigBirdPegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch_model.state_dict()
__UpperCamelCase ={}
# separating decoder weights
__UpperCamelCase ={k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
__UpperCamelCase ={k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
__UpperCamelCase =[k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
__UpperCamelCase =DECODER_PATTERNS
__UpperCamelCase =rename_state_dict_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__UpperCamelCase =v.T
__UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
__UpperCamelCase =[k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
__UpperCamelCase =REMAINING_PATTERNS
__UpperCamelCase =rename_state_dict_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__UpperCamelCase =v.T
__UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__UpperCamelCase =mapping['model.embed_positions.weight']
__UpperCamelCase =mapping.pop('model.embed_positions.weight' )
__UpperCamelCase , __UpperCamelCase =torch_model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={}
__UpperCamelCase =['global_step']
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc='converting tf checkpoint to dict' ):
__UpperCamelCase =any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCamelCase =tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =array
return tf_weights
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : dict ):
__UpperCamelCase =get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =convert_bigbird_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_A = parser.parse_args()
_A = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 682 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : UNetaDModel
UpperCAmelCase__ : ScoreSdeVeScheduler
def __init__( self , A_ , A_ ) -> Tuple:
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = 2000 , A_ = None , A_ = "pil" , A_ = True , **A_ , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCamelCase =self.unet.config.sample_size
__UpperCamelCase =(batch_size, 3, img_size, img_size)
__UpperCamelCase =self.unet
__UpperCamelCase =randn_tensor(A_ , generator=A_ ) * self.scheduler.init_noise_sigma
__UpperCamelCase =sample.to(self.device )
self.scheduler.set_timesteps(A_ )
self.scheduler.set_sigmas(A_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase =self.unet(A_ , A_ ).sample
__UpperCamelCase =self.scheduler.step_correct(A_ , A_ , generator=A_ ).prev_sample
# prediction step
__UpperCamelCase =model(A_ , A_ ).sample
__UpperCamelCase =self.scheduler.step_pred(A_ , A_ , A_ , generator=A_ )
__UpperCamelCase , __UpperCamelCase =output.prev_sample, output.prev_sample_mean
__UpperCamelCase =sample_mean.clamp(0 , 1 )
__UpperCamelCase =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase =self.numpy_to_pil(A_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A_ )
| 682 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 1 |
import math
import tensorflow as tf
from packaging import version
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tf.cast(math.pi , x.dtype )
__UpperCamelCase =tf.cast(0.044715 , x.dtype )
__UpperCamelCase =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(SCREAMING_SNAKE_CASE__ , 3 )) ))
return x * cdf
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
return x * tf.tanh(tf.math.softplus(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tf.cast(0.044715 , x.dtype )
__UpperCamelCase =tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
return tf.clip_by_value(_gelu(SCREAMING_SNAKE_CASE__ ) , -10 , 10 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=-1 ):
__UpperCamelCase , __UpperCamelCase =tf.split(SCREAMING_SNAKE_CASE__ , 2 , axis=SCREAMING_SNAKE_CASE__ )
return a * tf.math.sigmoid(SCREAMING_SNAKE_CASE__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
return tf.keras.activations.gelu(SCREAMING_SNAKE_CASE__ , approximate=SCREAMING_SNAKE_CASE__ )
_A = tf.keras.activations.gelu
_A = approximate_gelu_wrap
else:
_A = _gelu
_A = _gelu_new
_A = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 682 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json'}
_A = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_A = {'mgp-str': 27}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_="[GO]" , A_="[GO]" , A_="[s]" , A_="[GO]" , **A_ ) -> int:
super().__init__(
unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase =json.load(A_ )
__UpperCamelCase ={v: k for k, v in self.vocab.items()}
@property
def _a ( self ) -> List[Any]:
return len(self.vocab )
def _a ( self ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _a ( self , A_ ) -> Dict:
__UpperCamelCase =[]
for s in text:
char_tokens.extend(A_ )
return char_tokens
def _a ( self , A_ ) -> Any:
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def _a ( self , A_ ) -> Union[str, Any]:
return self.decoder.get(A_ )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
return (vocab_file,)
| 682 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__UpperCamelCase =ValueError('a should be a positive number' )
raise my_error
__UpperCamelCase =[1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =(0, 0, 0)
__UpperCamelCase =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_A = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_A = hamming(int(n))
print('-----------------------------------------------------')
print(f"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 682 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=2 , A_=3 , A_=4 , A_=2 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=36 , A_=3 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=6 , A_=6 , A_=3 , A_=4 , A_=None , A_=1000 , ) -> Optional[int]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =text_seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =coordinate_size
__UpperCamelCase =shape_size
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
__UpperCamelCase =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__UpperCamelCase =text_seq_length
__UpperCamelCase =(image_size // patch_size) ** 2 + 1
__UpperCamelCase =self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCamelCase =bbox[i, j, 3]
__UpperCamelCase =bbox[i, j, 1]
__UpperCamelCase =t
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCamelCase =bbox[i, j, 2]
__UpperCamelCase =bbox[i, j, 0]
__UpperCamelCase =t
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.text_seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__UpperCamelCase =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =LayoutLMvaModel(config=A_ )
model.to(A_ )
model.eval()
# text + image
__UpperCamelCase =model(A_ , pixel_values=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ , bbox=A_ , pixel_values=A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ , bbox=A_ , pixel_values=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__UpperCamelCase =model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__UpperCamelCase =model(pixel_values=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =LayoutLMvaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =LayoutLMvaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =LayoutLMvaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> int:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> List[Any]:
__UpperCamelCase =LayoutLMvaModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self , A_ , A_ , A_=False ) -> Optional[int]:
__UpperCamelCase =copy.deepcopy(A_ )
if model_class in get_values(A_ ):
__UpperCamelCase ={
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(A_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
__UpperCamelCase =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in get_values(A_ ):
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
__UpperCamelCase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=A_ , )
return inputs_dict
def _a ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase =type
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def _a ( self ) -> Optional[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =LayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Dict:
__UpperCamelCase =LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
__UpperCamelCase =torch.tensor([[1, 2]] )
__UpperCamelCase =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__UpperCamelCase =model(
input_ids=input_ids.to(A_ ) , bbox=bbox.to(A_ ) , pixel_values=pixel_values.to(A_ ) , )
# verify the logits
__UpperCamelCase =torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , A_ )
__UpperCamelCase =torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1E-4 ) )
| 682 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> bool:
__UpperCamelCase =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCamelCase =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE__ ) )
# The ratio of the area for circle to square is pi/4.
__UpperCamelCase =proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , ):
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) for _ in range(SCREAMING_SNAKE_CASE__ ) ) * (max_value - min_value)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
def identity_function(SCREAMING_SNAKE_CASE__ : float ) -> float:
return x
__UpperCamelCase =area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =(max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
def function_to_integrate(SCREAMING_SNAKE_CASE__ : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCamelCase =area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_A = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_A = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_A = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
def remove_articles(SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(SCREAMING_SNAKE_CASE__ , ' ' , SCREAMING_SNAKE_CASE__ )
def white_space_fix(SCREAMING_SNAKE_CASE__ : Optional[int] ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE__ ) ) ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
return int(normalize_answer(SCREAMING_SNAKE_CASE__ ) == normalize_answer(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[any(compute_exact(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
return (sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )) * 1_00
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =[rgram for rgrams in rgramslist for rgram in rgrams]
__UpperCamelCase =Counter(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =Counter(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =Counter()
for sgram, scount in sgramcounter.items():
__UpperCamelCase =scount * numref
__UpperCamelCase =Counter(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =Counter()
for cgram, ccount in cgramcounter.items():
__UpperCamelCase =ccount * numref
# KEEP
__UpperCamelCase =sgramcounter_rep & cgramcounter_rep
__UpperCamelCase =keepgramcounter_rep & rgramcounter
__UpperCamelCase =sgramcounter_rep & rgramcounter
__UpperCamelCase =0
__UpperCamelCase =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCamelCase =1
__UpperCamelCase =1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase =keeptmpscorea / len(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__UpperCamelCase =keeptmpscorea / sum(keepgramcounterall_rep.values() )
__UpperCamelCase =0
if keepscore_precision > 0 or keepscore_recall > 0:
__UpperCamelCase =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__UpperCamelCase =sgramcounter_rep - cgramcounter_rep
__UpperCamelCase =delgramcounter_rep - rgramcounter
__UpperCamelCase =sgramcounter_rep - rgramcounter
__UpperCamelCase =0
__UpperCamelCase =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCamelCase =1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase =deltmpscorea / len(SCREAMING_SNAKE_CASE__ )
# ADDITION
__UpperCamelCase =set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =set(SCREAMING_SNAKE_CASE__ ) & set(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCamelCase =1
__UpperCamelCase =1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase =addtmpscore / len(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase =addtmpscore / len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =0
if addscore_precision > 0 or addscore_recall > 0:
__UpperCamelCase =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =ssent.split(' ' )
__UpperCamelCase =csent.split(' ' )
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =[]
for rsent in rsents:
__UpperCamelCase =rsent.split(' ' )
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =[]
ragramslist.append(SCREAMING_SNAKE_CASE__ )
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
__UpperCamelCase =ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 2:
__UpperCamelCase =ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 3:
__UpperCamelCase =ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(SCREAMING_SNAKE_CASE__ )
ragramslist.append(SCREAMING_SNAKE_CASE__ )
ragramslist.append(SCREAMING_SNAKE_CASE__ )
ragramslist.append(SCREAMING_SNAKE_CASE__ )
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
__UpperCamelCase =sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 2:
__UpperCamelCase =sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 3:
__UpperCamelCase =sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(SCREAMING_SNAKE_CASE__ )
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
__UpperCamelCase =cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 2:
__UpperCamelCase =cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 3:
__UpperCamelCase =cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(SCREAMING_SNAKE_CASE__ )
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) =SARIngram(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) =SARIngram(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) =SARIngram(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) =SARIngram(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__UpperCamelCase =sum([delascore, delascore, delascore, delascore] ) / 4
__UpperCamelCase =sum([addascore, addascore, addascore, addascore] ) / 4
__UpperCamelCase =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "13a" , SCREAMING_SNAKE_CASE__ : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__UpperCamelCase =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__UpperCamelCase =sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE__ )()(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE__ )
elif tokenizer == "moses":
__UpperCamelCase =sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE__ , return_str=SCREAMING_SNAKE_CASE__ , escape=SCREAMING_SNAKE_CASE__ )
elif tokenizer == "penn":
__UpperCamelCase =sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE__ , return_str=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =sentence
if not return_str:
__UpperCamelCase =normalized_sent.split()
return normalized_sent
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
if not (len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )):
raise ValueError('Sources length must match predictions and references lengths.' )
__UpperCamelCase =0
for src, pred, refs in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE__ ) , normalize(SCREAMING_SNAKE_CASE__ ) , [normalize(SCREAMING_SNAKE_CASE__ ) for sent in refs] )
__UpperCamelCase =sari_score / len(SCREAMING_SNAKE_CASE__ )
return 1_00 * sari_score
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any="exp" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False , ):
__UpperCamelCase =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__UpperCamelCase =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
__UpperCamelCase =sacrebleu.corpus_bleu(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , smooth_method=SCREAMING_SNAKE_CASE__ , smooth_value=SCREAMING_SNAKE_CASE__ , force=SCREAMING_SNAKE_CASE__ , lowercase=SCREAMING_SNAKE_CASE__ , use_effective_order=SCREAMING_SNAKE_CASE__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _a ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def _a ( self , A_ , A_ , A_ ) -> str:
__UpperCamelCase ={}
result.update({'sari': compute_sari(sources=A_ , predictions=A_ , references=A_ )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=A_ , references=A_ )} )
result.update({'exact': compute_em(predictions=A_ , references=A_ )} )
return result
| 682 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if n == 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return 0
elif n == 2:
return 1
else:
__UpperCamelCase =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =0
__UpperCamelCase =2
while digits < n:
index += 1
__UpperCamelCase =len(str(fibonacci(SCREAMING_SNAKE_CASE__ ) ) )
return index
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10_00 ):
return fibonacci_digits_index(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 682 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 1 |
import numpy as np
from transformers import Pipeline
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =np.max(SCREAMING_SNAKE_CASE__ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , **A_ ) -> Union[str, Any]:
__UpperCamelCase ={}
if "second_text" in kwargs:
__UpperCamelCase =kwargs['second_text']
return preprocess_kwargs, {}, {}
def _a ( self , A_ , A_=None ) -> Dict:
return self.tokenizer(A_ , text_pair=A_ , return_tensors=self.framework )
def _a ( self , A_ ) -> Any:
return self.model(**A_ )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =model_outputs.logits[0].numpy()
__UpperCamelCase =softmax(A_ )
__UpperCamelCase =np.argmax(A_ )
__UpperCamelCase =self.model.config.idalabel[best_class]
__UpperCamelCase =probabilities[best_class].item()
__UpperCamelCase =logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 682 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =1.5
__UpperCamelCase =int(factor * num_class_images )
__UpperCamelCase =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=SCREAMING_SNAKE_CASE__ , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=SCREAMING_SNAKE_CASE__ )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__UpperCamelCase =client.query(text=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__UpperCamelCase =int(factor * num_images )
__UpperCamelCase =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=SCREAMING_SNAKE_CASE__ , aesthetic_weight=0.1 , )
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(desc='downloading real regularization images' , total=SCREAMING_SNAKE_CASE__ )
with open(F'{class_data_dir}/caption.txt' , 'w' ) as fa, open(F'{class_data_dir}/urls.txt' , 'w' ) as fa, open(
F'{class_data_dir}/images.txt' , 'w' ) as fa:
while total < num_class_images:
__UpperCamelCase =class_images[count]
count += 1
try:
__UpperCamelCase =requests.get(images['url'] )
if img.status_code == 2_00:
__UpperCamelCase =Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'{class_data_dir}/images/{total}.jpg' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser('' , add_help=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_00 , type=SCREAMING_SNAKE_CASE__ )
return parser.parse_args()
if __name__ == "__main__":
_A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 682 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'{test_file} instead.' )
__UpperCamelCase =components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
__UpperCamelCase =components[:-1] + [test_fn.replace('.py' , '' )]
__UpperCamelCase ='.'.join(SCREAMING_SNAKE_CASE__ )
return test_module_path
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =get_module_path(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =importlib.import_module(SCREAMING_SNAKE_CASE__ )
return test_module
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
__UpperCamelCase =get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
__UpperCamelCase =get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , 'all_model_classes' , [] )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =get_test_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =test_class()
if hasattr(SCREAMING_SNAKE_CASE__ , 'setUp' ):
test.setUp()
__UpperCamelCase =None
if hasattr(SCREAMING_SNAKE_CASE__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__UpperCamelCase =test.model_tester.__class__
return model_tester
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =get_test_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for test_class in test_classes:
__UpperCamelCase =get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =get_test_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ ) for test_class in test_classes}
return test_tester_mapping
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =get_model_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_test_mapping
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =get_model_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE__ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {to_json(SCREAMING_SNAKE_CASE__ ): to_json(SCREAMING_SNAKE_CASE__ ) for k, v in o.items()}
else:
return o
| 682 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 1 |
import heapq
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict ):
__UpperCamelCase =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(SCREAMING_SNAKE_CASE__ , [-1 * len(SCREAMING_SNAKE_CASE__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__UpperCamelCase =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__UpperCamelCase =heapq.heappop(SCREAMING_SNAKE_CASE__ )[1][0]
chosen_vertices.add(SCREAMING_SNAKE_CASE__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__UpperCamelCase =elem[1][1].index(SCREAMING_SNAKE_CASE__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(SCREAMING_SNAKE_CASE__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 682 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "vivit"
def __init__( self , A_=224 , A_=32 , A_=[2, 16, 16] , A_=3 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu_fast" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-06 , A_=True , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =num_frames
__UpperCamelCase =tubelet_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
super().__init__(**A_ )
| 682 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : bytes ):
return "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE__ )] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE__ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE__ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = "mra"
def __init__( self , A_=50265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=1 , A_=0.02 , A_=1E-5 , A_="absolute" , A_=4 , A_="full" , A_=0 , A_=0 , A_=1 , A_=0 , A_=2 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =type_vocab_size
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =block_per_row
__UpperCamelCase =approx_mode
__UpperCamelCase =initial_prior_first_n_blocks
__UpperCamelCase =initial_prior_diagonal_n_blocks
| 682 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =mask_ratio
__UpperCamelCase =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__UpperCamelCase =(image_size // patch_size) ** 2
__UpperCamelCase =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _a ( self ) -> int:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _a ( self , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =ViTMAEModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> int:
__UpperCamelCase =ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
__UpperCamelCase =(self.image_size // self.patch_size) ** 2
__UpperCamelCase =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__UpperCamelCase =1
__UpperCamelCase =ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase =model(A_ )
__UpperCamelCase =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _a ( self ) -> Any:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCAmelCase__ : List[Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = False
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =ViTMAEModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> str:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def _a ( self , A_ , A_ , A_ ) -> str:
# make masks reproducible
np.random.seed(2 )
__UpperCamelCase =int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__UpperCamelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__UpperCamelCase =torch.from_numpy(A_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__UpperCamelCase =pt_noise
super().check_pt_tf_models(A_ , A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
model.to(A_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
__UpperCamelCase =outputs[0].cpu().numpy()
__UpperCamelCase =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__UpperCamelCase =model_class.from_pretrained(A_ )
model.to(A_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
# Make sure we don't have nans
__UpperCamelCase =after_outputs[0].cpu().numpy()
__UpperCamelCase =0
__UpperCamelCase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _a ( self ) -> Dict:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _a ( self ) -> Union[str, Any]:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _a ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def _a ( self ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self ) -> str:
pass
@slow
def _a ( self ) -> Any:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTMAEModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def _a ( self ) -> Any:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__UpperCamelCase =ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__UpperCamelCase =ViTMAEConfig()
__UpperCamelCase =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__UpperCamelCase =np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ , noise=torch.from_numpy(A_ ).to(device=A_ ) )
# verify the logits
__UpperCamelCase =torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(A_ ) , atol=1E-4 ) )
| 682 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_A = logging.get_logger(__name__)
# General docstring
_A = 'RegNetConfig'
# Base docstring
_A = 'facebook/regnet-y-040'
_A = [1, 1088, 7, 7]
# Image classification docstring
_A = 'facebook/regnet-y-040'
_A = 'tabby, tabby cat'
_A = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , ) -> Dict:
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase =tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
__UpperCamelCase =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
__UpperCamelCase =ACTaFN[activation] if activation is not None else tf.identity
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =self.convolution(self.padding(A_ ) )
__UpperCamelCase =self.normalization(A_ )
__UpperCamelCase =self.activation(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , **A_ ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =config.num_channels
__UpperCamelCase =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def _a ( self , A_ ) -> Any:
__UpperCamelCase =shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase =tf.transpose(A_ , perm=(0, 2, 3, 1) )
__UpperCamelCase =self.embedder(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ = 2 , **A_ ) -> int:
super().__init__(**A_ )
__UpperCamelCase =tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
__UpperCamelCase =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def _a ( self , A_ , A_ = False ) -> tf.Tensor:
return self.normalization(self.convolution(A_ ) , training=A_ )
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ , **A_ ) -> Dict:
super().__init__(**A_ )
__UpperCamelCase =tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
__UpperCamelCase =[
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def _a ( self , A_ ) -> Optional[int]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase =self.pooler(A_ )
for layer_module in self.attention:
__UpperCamelCase =layer_module(A_ )
__UpperCamelCase =hidden_state * pooled
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ ) -> int:
super().__init__(**A_ )
__UpperCamelCase =in_channels != out_channels or stride != 1
__UpperCamelCase =max(1 , out_channels // config.groups_width )
__UpperCamelCase =(
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase =[
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
__UpperCamelCase =ACTaFN[config.hidden_act]
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =hidden_state
for layer_module in self.layers:
__UpperCamelCase =layer_module(A_ )
__UpperCamelCase =self.shortcut(A_ )
hidden_state += residual
__UpperCamelCase =self.activation(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =in_channels != out_channels or stride != 1
__UpperCamelCase =max(1 , out_channels // config.groups_width )
__UpperCamelCase =(
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
__UpperCamelCase =[
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
__UpperCamelCase =ACTaFN[config.hidden_act]
def _a ( self , A_ ) -> int:
__UpperCamelCase =hidden_state
for layer_module in self.layers:
__UpperCamelCase =layer_module(A_ )
__UpperCamelCase =self.shortcut(A_ )
hidden_state += residual
__UpperCamelCase =self.activation(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ ) -> Dict:
super().__init__(**A_ )
__UpperCamelCase =TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__UpperCamelCase =[
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def _a ( self , A_ ) -> Optional[Any]:
for layer_module in self.layers:
__UpperCamelCase =layer_module(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , **A_ ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
__UpperCamelCase =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=f'stages.{i+1}' ) )
def _a ( self , A_ , A_ = False , A_ = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase =hidden_states + (hidden_state,)
__UpperCamelCase =stage_module(A_ )
if output_hidden_states:
__UpperCamelCase =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = RegNetConfig
def __init__( self , A_ , **A_ ) -> Optional[int]:
super().__init__(**A_ )
__UpperCamelCase =config
__UpperCamelCase =TFRegNetEmbeddings(A_ , name='embedder' )
__UpperCamelCase =TFRegNetEncoder(A_ , name='encoder' )
__UpperCamelCase =tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def _a ( self , A_ , A_ = None , A_ = None , A_ = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase =self.embedder(A_ , training=A_ )
__UpperCamelCase =self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
__UpperCamelCase =encoder_outputs[0]
__UpperCamelCase =self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase =tf.transpose(A_ , perm=(0, 3, 1, 2) )
__UpperCamelCase =tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase =tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = RegNetConfig
UpperCAmelCase__ : Optional[int] = "regnet"
UpperCAmelCase__ : Tuple = "pixel_values"
@property
def _a ( self ) -> int:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_A = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_A = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , A_ , )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , *A_ , **A_ ) -> str:
super().__init__(A_ , *A_ , **A_ )
__UpperCamelCase =TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , A_ , A_ = None , A_ = None , A_=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase =self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , A_ , )
class UpperCAmelCase__ ( A_ , A_ ):
"""simple docstring"""
def __init__( self , A_ , *A_ , **A_ ) -> Optional[Any]:
super().__init__(A_ , *A_ , **A_ )
__UpperCamelCase =config.num_labels
__UpperCamelCase =TFRegNetMainLayer(A_ , name='regnet' )
# classification head
__UpperCamelCase =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase =self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
__UpperCamelCase =outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase =self.classifier[0](A_ )
__UpperCamelCase =self.classifier[1](A_ )
__UpperCamelCase =None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
__UpperCamelCase =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 682 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_ ) -> int:
__UpperCamelCase =3
__UpperCamelCase =250
__UpperCamelCase =ids_tensor((batch_size, length) , A_ )
__UpperCamelCase =torch.ones((batch_size, length) , device=A_ , dtype=torch.float ) / length
return input_ids, scores
def _a ( self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase =self._get_tensors(5 )
__UpperCamelCase =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A_ , A_ ) )
__UpperCamelCase , __UpperCamelCase =self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
__UpperCamelCase , __UpperCamelCase =self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =MaxLengthCriteria(max_length=10 )
__UpperCamelCase , __UpperCamelCase =self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
__UpperCamelCase , __UpperCamelCase =self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
__UpperCamelCase , __UpperCamelCase =self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCamelCase , __UpperCamelCase =self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
__UpperCamelCase , __UpperCamelCase =self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
__UpperCamelCase , __UpperCamelCase =self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
__UpperCamelCase =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =self._get_tensors(5 )
__UpperCamelCase =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A_ , A_ ) )
__UpperCamelCase =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A_ , A_ ) )
def _a ( self ) -> Union[str, Any]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(A_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCamelCase =validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(A_ ) , 1 )
| 682 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
_A = {'allegro/herbert-base-cased': 514}
_A = {}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = HerbertTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_="</s>" , **A_ , ) -> Optional[int]:
super().__init__(
A_ , A_ , tokenizer_file=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , sep_token=A_ , **A_ , )
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 682 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase ={
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
__UpperCamelCase =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase =BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=SCREAMING_SNAKE_CASE__ , output_all_encodings=SCREAMING_SNAKE_CASE__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , SCREAMING_SNAKE_CASE__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase ='openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase =os.path.join(get_home_dir() , 'models' )
__UpperCamelCase =_load_vocab(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =nlp.model.BERTModel(
SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=SCREAMING_SNAKE_CASE__ , use_token_type_embed=SCREAMING_SNAKE_CASE__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=SCREAMING_SNAKE_CASE__ , use_decoder=SCREAMING_SNAKE_CASE__ , )
original_bort.load_parameters(SCREAMING_SNAKE_CASE__ , cast_dtype=SCREAMING_SNAKE_CASE__ , ignore_extra=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase ={
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(SCREAMING_SNAKE_CASE__ ),
}
__UpperCamelCase =BertConfig.from_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BertForMaskedLM(SCREAMING_SNAKE_CASE__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(SCREAMING_SNAKE_CASE__ : Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =hf_param.shape
__UpperCamelCase =to_torch(params[gluon_param] )
__UpperCamelCase =gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__UpperCamelCase =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__UpperCamelCase =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__UpperCamelCase =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__UpperCamelCase =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase =hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase =layer.attention.self
__UpperCamelCase =check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__UpperCamelCase =check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__UpperCamelCase =check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__UpperCamelCase =check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__UpperCamelCase =check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__UpperCamelCase =check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__UpperCamelCase =layer.attention.output
__UpperCamelCase =check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__UpperCamelCase =check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__UpperCamelCase =check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__UpperCamelCase =check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__UpperCamelCase =layer.intermediate
__UpperCamelCase =check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__UpperCamelCase =check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__UpperCamelCase =layer.output
__UpperCamelCase =check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__UpperCamelCase =check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__UpperCamelCase =check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__UpperCamelCase =check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase =RobertaTokenizer.from_pretrained('roberta-base' )
__UpperCamelCase =tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ )['input_ids']
# Get gluon output
__UpperCamelCase =mx.nd.array([input_ids] )
__UpperCamelCase =original_bort(inputs=SCREAMING_SNAKE_CASE__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
hf_bort_model.eval()
__UpperCamelCase =tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__UpperCamelCase =hf_bort_model(**SCREAMING_SNAKE_CASE__ )[0]
__UpperCamelCase =output_gluon[0].asnumpy()
__UpperCamelCase =output_hf[0].detach().numpy()
__UpperCamelCase =np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase =np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_A = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 682 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_=None , **A_ ) -> Union[str, Any]:
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , A_ , )
super().__init__(args=A_ , **A_ )
| 682 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_A = 50_0000
_A , _A = os.path.split(__file__)
_A = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =dataset.filter(**SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
__UpperCamelCase ={'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase =datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
__UpperCamelCase =generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , 'dataset.arrow' ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return tokenizer(examples['text'] )
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='numpy' ):
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='pandas' ):
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 682 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> List[Any]:
with open(A_ , encoding='utf-8' ) as input_file:
__UpperCamelCase =re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
__UpperCamelCase =input_file.read()
__UpperCamelCase =regexp.search(A_ )
return match
def _a ( self , A_ ) -> Dict:
with open(A_ , encoding='utf-8' ) as input_file:
__UpperCamelCase =re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
__UpperCamelCase =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCamelCase =regexp.finditer(A_ )
__UpperCamelCase =[match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _a ( self ) -> Dict:
__UpperCamelCase =Path('./datasets' )
__UpperCamelCase =list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A_ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def _a ( self ) -> List[str]:
__UpperCamelCase =Path('./datasets' )
__UpperCamelCase =list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(A_ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 682 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 1 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 682 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 1 |
from __future__ import annotations
_A = tuple[int, int, int]
_A = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
_A = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
_A = 'FOBHMDKEXQNRAULPGSJVTYICZW'
_A = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
_A = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
_A = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
_A = 'SGLCPQWZHKXAREONTFBVIYJUDM'
_A = 'HVSICLTYKQUBXDWAJZOMFGPREN'
_A = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
_A = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
_A = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : RotorPositionT , SCREAMING_SNAKE_CASE__ : RotorSelectionT , SCREAMING_SNAKE_CASE__ : str ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE__ ) )) < 3:
__UpperCamelCase =F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(SCREAMING_SNAKE_CASE__ )
# Checks if rotor positions are valid
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Validates string and returns dict
__UpperCamelCase =_plugboard(SCREAMING_SNAKE_CASE__ )
return rotpos, rotsel, pbdict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =F'Plugboard setting isn\'t type string ({type(SCREAMING_SNAKE_CASE__ )})'
raise TypeError(SCREAMING_SNAKE_CASE__ )
elif len(SCREAMING_SNAKE_CASE__ ) % 2 != 0:
__UpperCamelCase =F'Odd number of symbols ({len(SCREAMING_SNAKE_CASE__ )})'
raise Exception(SCREAMING_SNAKE_CASE__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
__UpperCamelCase =set()
for i in pbstring:
if i not in abc:
__UpperCamelCase =F'\'{i}\' not in list of symbols'
raise Exception(SCREAMING_SNAKE_CASE__ )
elif i in tmppbl:
__UpperCamelCase =F'Duplicate symbol ({i})'
raise Exception(SCREAMING_SNAKE_CASE__ )
else:
tmppbl.add(SCREAMING_SNAKE_CASE__ )
del tmppbl
# Created the dictionary
__UpperCamelCase ={}
for j in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 , 2 ):
__UpperCamelCase =pbstring[j + 1]
__UpperCamelCase =pbstring[j]
return pb
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : RotorPositionT , SCREAMING_SNAKE_CASE__ : RotorSelectionT = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE__ : str = "" , ):
__UpperCamelCase =text.upper()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =_validator(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , plugb.upper() )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =rotor_position
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__UpperCamelCase =[]
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__UpperCamelCase =plugboard[symbol]
# rotor ra --------------------------
__UpperCamelCase =abc.index(SCREAMING_SNAKE_CASE__ ) + rotorposa
__UpperCamelCase =rotora[index % len(SCREAMING_SNAKE_CASE__ )]
# rotor rb --------------------------
__UpperCamelCase =abc.index(SCREAMING_SNAKE_CASE__ ) + rotorposa
__UpperCamelCase =rotora[index % len(SCREAMING_SNAKE_CASE__ )]
# rotor rc --------------------------
__UpperCamelCase =abc.index(SCREAMING_SNAKE_CASE__ ) + rotorposa
__UpperCamelCase =rotora[index % len(SCREAMING_SNAKE_CASE__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__UpperCamelCase =reflector[symbol]
# 2nd rotors
__UpperCamelCase =abc[rotora.index(SCREAMING_SNAKE_CASE__ ) - rotorposa]
__UpperCamelCase =abc[rotora.index(SCREAMING_SNAKE_CASE__ ) - rotorposa]
__UpperCamelCase =abc[rotora.index(SCREAMING_SNAKE_CASE__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__UpperCamelCase =plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = 'This is my Python script that emulates the Enigma machine from WWII.'
_A = (1, 1, 1)
_A = 'pictures'
_A = (rotora, rotora, rotora)
_A = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 682 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : int = False
def _a ( self , A_ , A_ , A_=False ) -> Optional[Any]:
__UpperCamelCase =super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
__UpperCamelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
__UpperCamelCase =embedding_size
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =TFMobileBertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =TFMobileBertForMaskedLM(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFMobileBertForNextSentencePrediction(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =TFMobileBertForPreTraining(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFMobileBertForSequenceClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFMobileBertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFMobileBertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =TFMobileBertForQuestionAnswering(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TFMobileBertModelTest.TFMobileBertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A_ )
@slow
def _a ( self ) -> str:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__UpperCamelCase =TFMobileBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> str:
__UpperCamelCase =TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =[1, 6, 30522]
self.assertEqual(output.shape , A_ )
__UpperCamelCase =tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
| 682 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 1_00 , ):
__UpperCamelCase =x_start
__UpperCamelCase =fnc(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates curve as a sequence of linear lines and sums their length
__UpperCamelCase =(x_end - x_start) / steps + xa
__UpperCamelCase =fnc(SCREAMING_SNAKE_CASE__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__UpperCamelCase =xa
__UpperCamelCase =fxa
return length
if __name__ == "__main__":
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_A = 10
while i <= 10_0000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 682 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Iterable[str] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =iter(SCREAMING_SNAKE_CASE__ )
while True:
__UpperCamelCase =tuple(itertools.islice(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not chunk:
return
yield chunk
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__UpperCamelCase =''
if len(SCREAMING_SNAKE_CASE__ ) < 2:
return dirty
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(SCREAMING_SNAKE_CASE__ ) & 1:
clean += "X"
return clean
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__UpperCamelCase ='ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__UpperCamelCase =[]
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(SCREAMING_SNAKE_CASE__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(SCREAMING_SNAKE_CASE__ )
return table
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =generate_table(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =prepare_input(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(SCREAMING_SNAKE_CASE__ , 2 ):
__UpperCamelCase , __UpperCamelCase =divmod(table.index(SCREAMING_SNAKE_CASE__ ) , 5 )
__UpperCamelCase , __UpperCamelCase =divmod(table.index(SCREAMING_SNAKE_CASE__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =generate_table(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(SCREAMING_SNAKE_CASE__ , 2 ):
__UpperCamelCase , __UpperCamelCase =divmod(table.index(SCREAMING_SNAKE_CASE__ ) , 5 )
__UpperCamelCase , __UpperCamelCase =divmod(table.index(SCREAMING_SNAKE_CASE__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 682 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
_A = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__UpperCamelCase =tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase =test_hf_cache_home / 'datasets'
__UpperCamelCase =test_hf_cache_home / 'metrics'
__UpperCamelCase =test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(SCREAMING_SNAKE_CASE__ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(SCREAMING_SNAKE_CASE__ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE__ ) )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ , scope='session' )
def _UpperCAmelCase ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , SCREAMING_SNAKE_CASE__ )
| 682 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = FlaxAutoencoderKL
@property
def _a ( self ) -> int:
__UpperCamelCase =4
__UpperCamelCase =3
__UpperCamelCase =(32, 32)
__UpperCamelCase =jax.random.PRNGKey(0 )
__UpperCamelCase =jax.random.uniform(A_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _a ( self ) -> Optional[Any]:
__UpperCamelCase ={
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__UpperCamelCase =self.dummy_input
return init_dict, inputs_dict
| 682 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 1 |
from typing import Any
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> Tuple:
__UpperCamelCase =data
__UpperCamelCase =None
def __repr__( self ) -> str:
return f'Node({self.data})'
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =None
def __iter__( self ) -> Any:
__UpperCamelCase =self.head
while node:
yield node.data
__UpperCamelCase =node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(A_ ) for item in self] )
def __getitem__( self , A_ ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , A_ , A_ ) -> None:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase =self.head
for _ in range(A_ ):
__UpperCamelCase =current.next
__UpperCamelCase =data
def _a ( self , A_ ) -> None:
self.insert_nth(len(self ) , A_ )
def _a ( self , A_ ) -> None:
self.insert_nth(0 , A_ )
def _a ( self , A_ , A_ ) -> None:
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase =Node(A_ )
if self.head is None:
__UpperCamelCase =new_node
elif index == 0:
__UpperCamelCase =self.head # link new_node to head
__UpperCamelCase =new_node
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =new_node
def _a ( self ) -> None: # print every node data
print(self )
def _a ( self ) -> Any:
return self.delete_nth(0 )
def _a ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def _a ( self , A_ = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase =self.head # default first node
if index == 0:
__UpperCamelCase =self.head.next
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next.next
return delete_node.data
def _a ( self ) -> bool:
return self.head is None
def _a ( self ) -> None:
__UpperCamelCase =None
__UpperCamelCase =self.head
while current:
# Store the current node's next node.
__UpperCamelCase =current.next
# Make the current node's next point backwards
__UpperCamelCase =prev
# Make the previous node be the current node
__UpperCamelCase =current
# Make the current node the next node (to progress iteration)
__UpperCamelCase =next_node
# Return prev in order to put the head at the end
__UpperCamelCase =prev
def _UpperCAmelCase ( ):
__UpperCamelCase =LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(SCREAMING_SNAKE_CASE__ ) == 9
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase =-i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(-8 , 1 ) )
def _UpperCAmelCase ( ):
__UpperCamelCase =[
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-192.55555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase =LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase =linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase =linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase =linked_list.delete_nth(10 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _UpperCAmelCase ( ):
from doctest import testmod
testmod()
__UpperCamelCase =LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(SCREAMING_SNAKE_CASE__ )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
__UpperCamelCase =input('Enter New Value: ' ).strip()
print('New list:' )
print(SCREAMING_SNAKE_CASE__ )
print(F'length of linked_list is : {len(SCREAMING_SNAKE_CASE__ )}' )
if __name__ == "__main__":
main()
| 682 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_A = logging.get_logger('transformers.models.encodec')
_A = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_A = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_A = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_A = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_A = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_A = []
_A = []
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
for attribute in key.split('.' ):
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
__UpperCamelCase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__UpperCamelCase =value
elif weight_type == "weight_g":
__UpperCamelCase =value
elif weight_type == "weight_v":
__UpperCamelCase =value
elif weight_type == "bias":
__UpperCamelCase =value
elif weight_type == "running_mean":
__UpperCamelCase =value
elif weight_type == "running_var":
__UpperCamelCase =value
elif weight_type == "num_batches_tracked":
__UpperCamelCase =value
elif weight_type == "weight_ih_l0":
__UpperCamelCase =value
elif weight_type == "weight_hh_l0":
__UpperCamelCase =value
elif weight_type == "bias_ih_l0":
__UpperCamelCase =value
elif weight_type == "bias_hh_l0":
__UpperCamelCase =value
elif weight_type == "weight_ih_l1":
__UpperCamelCase =value
elif weight_type == "weight_hh_l1":
__UpperCamelCase =value
elif weight_type == "bias_ih_l1":
__UpperCamelCase =value
elif weight_type == "bias_hh_l1":
__UpperCamelCase =value
else:
__UpperCamelCase =value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCamelCase , __UpperCamelCase =key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =[]
if model_name == "encodec_24khz" or "encodec_32khz":
__UpperCamelCase =MAPPING_24K
elif model_name == "encodec_48khz":
__UpperCamelCase =MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.info(F'{name} was ignored' )
continue
__UpperCamelCase =False
for key, mapped_key in MAPPING.items():
if "*" in key:
__UpperCamelCase , __UpperCamelCase =key.split('.*.' )
if prefix in name and suffix in name:
__UpperCamelCase =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
__UpperCamelCase =True
if "*" in mapped_key:
__UpperCamelCase =name.split(SCREAMING_SNAKE_CASE__ )[0].split('.' )[-2]
__UpperCamelCase =mapped_key.replace('*' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
__UpperCamelCase ='weight_g'
elif "weight_v" in name:
__UpperCamelCase ='weight_v'
elif "weight_ih_l0" in name:
__UpperCamelCase ='weight_ih_l0'
elif "weight_hh_l0" in name:
__UpperCamelCase ='weight_hh_l0'
elif "bias_ih_l0" in name:
__UpperCamelCase ='bias_ih_l0'
elif "bias_hh_l0" in name:
__UpperCamelCase ='bias_hh_l0'
elif "weight_ih_l1" in name:
__UpperCamelCase ='weight_ih_l1'
elif "weight_hh_l1" in name:
__UpperCamelCase ='weight_hh_l1'
elif "bias_ih_l1" in name:
__UpperCamelCase ='bias_ih_l1'
elif "bias_hh_l1" in name:
__UpperCamelCase ='bias_hh_l1'
elif "bias" in name:
__UpperCamelCase ='bias'
elif "weight" in name:
__UpperCamelCase ='weight'
elif "running_mean" in name:
__UpperCamelCase ='running_mean'
elif "running_var" in name:
__UpperCamelCase ='running_var'
elif "num_batches_tracked" in name:
__UpperCamelCase ='num_batches_tracked'
else:
__UpperCamelCase =None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , ):
if config_path is not None:
__UpperCamelCase =EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__UpperCamelCase =[8, 5, 4, 4]
__UpperCamelCase =[2.2]
__UpperCamelCase =64
__UpperCamelCase =3_20_00
__UpperCamelCase =20_48
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
elif model_name == "encodec_48khz":
__UpperCamelCase =[8, 5, 4, 2]
__UpperCamelCase =[3.0, 6.0, 12.0, 24.0]
__UpperCamelCase =4_80_00
__UpperCamelCase =2
__UpperCamelCase =False
__UpperCamelCase ='time_group_norm'
__UpperCamelCase =True
__UpperCamelCase =1.0
__UpperCamelCase =0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
__UpperCamelCase =EncodecModel(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__UpperCamelCase =original_checkpoint['best_state']
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 682 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 50 ):
__UpperCamelCase =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 682 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=2 , A_=24 , A_=16 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , A_=2 , A_=2 , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =patch_size
__UpperCamelCase =max_length
__UpperCamelCase =num_mel_bins
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =frequency_stride
__UpperCamelCase =time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCamelCase =(self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCamelCase =(self.max_length - self.patch_size) // self.time_stride + 1
__UpperCamelCase =frequency_out_dimension * time_out_dimension
__UpperCamelCase =num_patches + 2
def _a ( self ) -> Tuple:
__UpperCamelCase =floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, input_values, labels
def _a ( self ) -> Optional[int]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _a ( self , A_ , A_ , A_ ) -> str:
__UpperCamelCase =ASTModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_values': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : str = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Dict = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =ASTModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def _a ( self ) -> Tuple:
pass
def _a ( self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['input_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def _a ( self ) -> str:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ASTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
__UpperCamelCase , __UpperCamelCase =torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> str:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =self.default_feature_extractor
__UpperCamelCase =ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(A_ )
__UpperCamelCase =self.default_feature_extractor
__UpperCamelCase , __UpperCamelCase =prepare_audio()
__UpperCamelCase =audio.squeeze().numpy()
__UpperCamelCase =feature_extractor(A_ , sampling_rate=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-0.8760, -7.0042, -8.6602] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 682 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=2 , A_=3 , A_=4 , A_=2 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=36 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=6 , A_=6 , A_=3 , A_=4 , A_=None , A_=1000 , ) -> List[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =coordinate_size
__UpperCamelCase =shape_size
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
__UpperCamelCase =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__UpperCamelCase =text_seq_length
__UpperCamelCase =(image_size // patch_size) ** 2 + 1
__UpperCamelCase =self.text_seq_length + self.image_seq_length
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__UpperCamelCase =bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCamelCase =bbox[i, j, 3]
__UpperCamelCase =bbox[i, j, 1]
__UpperCamelCase =tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCamelCase =bbox[i, j, 2]
__UpperCamelCase =bbox[i, j, 0]
__UpperCamelCase =tmp_coordinate
__UpperCamelCase =tf.constant(A_ )
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.text_seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__UpperCamelCase =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFLayoutLMvaModel(config=A_ )
# text + image
__UpperCamelCase =model(A_ , pixel_values=A_ , training=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , training=A_ , )
__UpperCamelCase =model(A_ , bbox=A_ , pixel_values=A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__UpperCamelCase =model(A_ , training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__UpperCamelCase =model({'pixel_values': pixel_values} , training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFLayoutLMvaForSequenceClassification(config=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , training=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFLayoutLMvaForTokenClassification(config=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , training=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =2
__UpperCamelCase =TFLayoutLMvaForQuestionAnswering(config=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , training=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> str:
__UpperCamelCase =self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Dict:
return True
def _a ( self , A_ , A_ , A_=False ) -> dict:
__UpperCamelCase =copy.deepcopy(A_ )
if model_class in get_values(A_ ):
__UpperCamelCase ={
k: tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(A_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
__UpperCamelCase =tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
__UpperCamelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__UpperCamelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
__UpperCamelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
__UpperCamelCase =tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _a ( self ) -> List[str]:
__UpperCamelCase =TFLayoutLMvaModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
if getattr(A_ , 'hf_compute_loss' , A_ ):
# The number of elements in the loss should be the same as the number of elements in the label
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__UpperCamelCase =prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=A_ )[0]
]
__UpperCamelCase =added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__UpperCamelCase =prepared_for_class.pop('input_ids' )
__UpperCamelCase =model(A_ , **A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__UpperCamelCase =prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__UpperCamelCase =prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__UpperCamelCase =-100
__UpperCamelCase =tf.convert_to_tensor(A_ )
__UpperCamelCase =model(A_ , **A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__UpperCamelCase =model(A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
# Get keys that were added with the _prepare_for_class function
__UpperCamelCase =prepared_for_class.keys() - inputs_dict.keys()
__UpperCamelCase =inspect.signature(model.call ).parameters
__UpperCamelCase =list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__UpperCamelCase ={0: 'input_ids'}
for label_key in label_keys:
__UpperCamelCase =signature_names.index(A_ )
__UpperCamelCase =label_key
__UpperCamelCase =sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__UpperCamelCase =[]
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__UpperCamelCase =prepared_for_class[value]
__UpperCamelCase =tuple(A_ )
# Send to model
__UpperCamelCase =model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _a ( self ) -> Optional[int]:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ , A_ , A_ )
def _a ( self ) -> List[Any]:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase =type
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ , A_ , A_ )
def _a ( self ) -> Optional[int]:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
def _a ( self ) -> int:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
def _a ( self ) -> Any:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
@slow
def _a ( self ) -> str:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFLayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Optional[int]:
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
__UpperCamelCase =TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='tf' ).pixel_values
__UpperCamelCase =tf.constant([[1, 2]] )
__UpperCamelCase =tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__UpperCamelCase =model(input_ids=A_ , bbox=A_ , pixel_values=A_ , training=A_ )
# verify the logits
__UpperCamelCase =(1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , A_ )
__UpperCamelCase =tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1E-4 ) )
| 682 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =0
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return arr, 0
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // 2
__UpperCamelCase =arr[0:mid]
__UpperCamelCase =arr[mid:]
__UpperCamelCase , __UpperCamelCase =count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =_count_cross_inversions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[]
__UpperCamelCase =__UpperCamelCase =__UpperCamelCase =0
while i < len(SCREAMING_SNAKE_CASE__ ) and j < len(SCREAMING_SNAKE_CASE__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _UpperCAmelCase ( ):
__UpperCamelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__UpperCamelCase =count_inversions_bf(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__UpperCamelCase =count_inversions_bf(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE__ )
# an empty list should also have zero inversions
__UpperCamelCase =[]
__UpperCamelCase =count_inversions_bf(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[float] ):
if point:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
__UpperCamelCase =(
'Expected a list of numbers as input, found '
F'{type(SCREAMING_SNAKE_CASE__ ).__name__}'
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =F'Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}'
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError('Missing an input' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__UpperCamelCase =get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(A_ ) , torch_builtin(A_ ) ) )
self.assertFalse(torch.allclose(gelu_python(A_ ) , gelu_new(A_ ) ) )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__UpperCamelCase =get_activation('gelu' )
__UpperCamelCase =get_activation('gelu_10' )
__UpperCamelCase =torch_builtin(A_ )
__UpperCamelCase =geluaa(A_ )
__UpperCamelCase =torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(A_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _a ( self ) -> int:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(A_ ):
get_activation('bogus' )
with self.assertRaises(A_ ):
get_activation(A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =get_activation('gelu' )
__UpperCamelCase =1
__UpperCamelCase =get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(A_ ):
__UpperCamelCase =acta.a
| 682 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 1 |
import os
def _UpperCAmelCase ( ):
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + '/p022_names.txt' ) as file:
__UpperCamelCase =str(file.readlines()[0] )
__UpperCamelCase =names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase =0
__UpperCamelCase =0
for i, name in enumerate(SCREAMING_SNAKE_CASE__ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE__ ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase =0
return total_score
if __name__ == "__main__":
print(solution())
| 682 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCAmelCase__ :
"""simple docstring"""
pass
| 682 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Any:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Tuple:
pass
def _a ( self ) -> Optional[int]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> str:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Tuple:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Dict:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : List[Any]=1_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=10_26 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Tuple="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE__ : Dict="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
__UpperCamelCase , __UpperCamelCase =generate_datasets(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ , min_len=10_26 , trim=SCREAMING_SNAKE_CASE__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__UpperCamelCase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__UpperCamelCase =load_gpta('gpt2' ).to(SCREAMING_SNAKE_CASE__ )
print('computing perplexity on objective set' )
__UpperCamelCase =compute_perplexity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).item()
print('perplexity on objective set:' , SCREAMING_SNAKE_CASE__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=15 , SCREAMING_SNAKE_CASE__ : Dict=1_28 , SCREAMING_SNAKE_CASE__ : List[str]=1_00 , SCREAMING_SNAKE_CASE__ : List[Any]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
__UpperCamelCase =GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__UpperCamelCase =SecondaryLearner(SCREAMING_SNAKE_CASE__ )
# Train secondary learner
__UpperCamelCase =train_secondary_learner(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_epochs=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , eval_freq=1_00 , igf_model_path=SCREAMING_SNAKE_CASE__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=10_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : Dict=1.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=recopy_gpta , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gpt2_finetuned.pt" , ):
__UpperCamelCase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =max_steps // (len(SCREAMING_SNAKE_CASE__ )) + 1
__UpperCamelCase =0
__UpperCamelCase =torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =recopy_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE__ )
secondary_learner.eval()
__UpperCamelCase =[]
__UpperCamelCase =0
__UpperCamelCase =[]
__UpperCamelCase =[]
# Compute the performance of the transformer model at the beginning
__UpperCamelCase =compute_perplexity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
test_perps.append(SCREAMING_SNAKE_CASE__ )
print('Test perplexity, step' , SCREAMING_SNAKE_CASE__ , ':' , SCREAMING_SNAKE_CASE__ )
for epoch in range(int(SCREAMING_SNAKE_CASE__ ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE__ ):
torch.cuda.empty_cache()
__UpperCamelCase =random.randint(0 , example.size(2 ) - context_len - 1 )
__UpperCamelCase =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =True
if secondary_learner is not None:
__UpperCamelCase =secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__UpperCamelCase =-1
if predicted_q < threshold:
__UpperCamelCase =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__UpperCamelCase =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__UpperCamelCase =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__UpperCamelCase =compute_perplexity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
test_perps.append(SCREAMING_SNAKE_CASE__ )
print('Test perplexity, step' , SCREAMING_SNAKE_CASE__ , ':' , SCREAMING_SNAKE_CASE__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=SCREAMING_SNAKE_CASE__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=1_00 , type=SCREAMING_SNAKE_CASE__ , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=1_00 , type=SCREAMING_SNAKE_CASE__ , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=10_00 , type=SCREAMING_SNAKE_CASE__ , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=SCREAMING_SNAKE_CASE__ , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=SCREAMING_SNAKE_CASE__ , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=1_00 , type=SCREAMING_SNAKE_CASE__ , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=10_26 , type=SCREAMING_SNAKE_CASE__ , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=SCREAMING_SNAKE_CASE__ , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=SCREAMING_SNAKE_CASE__ , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=SCREAMING_SNAKE_CASE__ , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE__ , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
__UpperCamelCase =joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__UpperCamelCase =training_secondary_learner(
SCREAMING_SNAKE_CASE__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
__UpperCamelCase =GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__UpperCamelCase , __UpperCamelCase =generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE__ , secondary_learner=SCREAMING_SNAKE_CASE__ , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 682 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_=1024 , A_=1024 , A_=3.6 ) -> Optional[int]:
__UpperCamelCase =tokenizer
__UpperCamelCase =tokenizer.bos_token_id
__UpperCamelCase =dataset
__UpperCamelCase =seq_length
__UpperCamelCase =seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> str:
__UpperCamelCase =iter(self.dataset )
__UpperCamelCase =True
while more_examples:
__UpperCamelCase , __UpperCamelCase =[], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A_ )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__UpperCamelCase =False
break
__UpperCamelCase =tokenizer(A_ , truncation=A_ )['input_ids']
__UpperCamelCase =[]
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(A_ ) , self.seq_length ):
__UpperCamelCase =all_token_ids[i : i + self.seq_length]
if len(A_ ) == self.seq_length:
yield torch.tensor(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase ={'streaming': True}
__UpperCamelCase =load_dataset(args.dataset_name , split='train' , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =ConstantLengthDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , seq_length=args.seq_length )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=args.batch_size )
return eval_dataloader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
model.eval()
__UpperCamelCase =[]
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
with torch.no_grad():
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__UpperCamelCase =torch.mean(torch.cat(SCREAMING_SNAKE_CASE__ ) )
try:
__UpperCamelCase =torch.exp(SCREAMING_SNAKE_CASE__ )
except OverflowError:
__UpperCamelCase =float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_A = Accelerator()
# Parse configuration
_A = HfArgumentParser(EvaluationArguments)
_A = parser.parse_args()
set_seed(args.seed)
# Logging
_A = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_A = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_A = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_A = create_dataloader(args)
# Prepare everything with our `accelerator`.
_A , _A = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_A , _A = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 682 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# getting number of pixels in the image
__UpperCamelCase , __UpperCamelCase =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_A = imread('image_data/lena.jpg', 1)
# convert to its negative
_A = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 682 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = "deta"
UpperCAmelCase__ : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , A_=None , A_=900 , A_=2048 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=1024 , A_=8 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=True , A_=False , A_="sine" , A_=5 , A_=4 , A_=4 , A_=True , A_=300 , A_=True , A_=True , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , A_=0.25 , **A_ , ) -> int:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__UpperCamelCase =CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(A_ , A_ ):
__UpperCamelCase =backbone_config.pop('model_type' )
__UpperCamelCase =CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase =config_class.from_dict(A_ )
__UpperCamelCase =backbone_config
__UpperCamelCase =num_queries
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =init_xavier_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =auxiliary_loss
__UpperCamelCase =position_embedding_type
# deformable attributes
__UpperCamelCase =num_feature_levels
__UpperCamelCase =encoder_n_points
__UpperCamelCase =decoder_n_points
__UpperCamelCase =two_stage
__UpperCamelCase =two_stage_num_proposals
__UpperCamelCase =with_box_refine
__UpperCamelCase =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
__UpperCamelCase =class_cost
__UpperCamelCase =bbox_cost
__UpperCamelCase =giou_cost
# Loss coefficients
__UpperCamelCase =mask_loss_coefficient
__UpperCamelCase =dice_loss_coefficient
__UpperCamelCase =bbox_loss_coefficient
__UpperCamelCase =giou_loss_coefficient
__UpperCamelCase =eos_coefficient
__UpperCamelCase =focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _a ( self ) -> int:
return self.encoder_attention_heads
@property
def _a ( self ) -> int:
return self.d_model
def _a ( self ) -> Dict:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.backbone_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 1 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =[
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase , __UpperCamelCase =emb.weight.shape
__UpperCamelCase =nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =emb.weight.data
return lin_layer
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
__UpperCamelCase =Namespace(**checkpoint['cfg']['model'] )
__UpperCamelCase =checkpoint['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =state_dict['decoder.embed_tokens.weight'].shape[0]
__UpperCamelCase ={key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
__UpperCamelCase =XGLMConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__UpperCamelCase =XGLMForCausalLM(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A = parser.parse_args()
_A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 682 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE__ ) ) as con:
__UpperCamelCase =con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp.sql' )
__UpperCamelCase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
__UpperCamelCase =iter_sql_file(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp.sql' )
__UpperCamelCase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
__UpperCamelCase =iter_sql_file(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp.sql' )
__UpperCamelCase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 682 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('env' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =torch.__version__
__UpperCamelCase =torch.cuda.is_available()
__UpperCamelCase =is_xpu_available()
__UpperCamelCase =is_npu_available()
__UpperCamelCase ='Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =load_config_from_file(args.config_file ).to_dict()
__UpperCamelCase ={
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'PyTorch XPU available': str(SCREAMING_SNAKE_CASE__ ),
'PyTorch NPU available': str(SCREAMING_SNAKE_CASE__ ),
'System RAM': F'{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB',
}
if pt_cuda_available:
__UpperCamelCase =torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__UpperCamelCase =(
'\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else F'\t{accelerate_config}'
)
print(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =accelerate_config
return info
def _UpperCAmelCase ( ):
__UpperCamelCase =env_command_parser()
__UpperCamelCase =parser.parse_args()
env_command(SCREAMING_SNAKE_CASE__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 682 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.dummy_uncond_unet
__UpperCamelCase =PNDMScheduler()
__UpperCamelCase =PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =pndm(generator=A_ , num_inference_steps=20 , output_type='numpy' ).images
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =pndm(generator=A_ , num_inference_steps=20 , output_type='numpy' , return_dict=A_ )[0]
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase =np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Dict:
__UpperCamelCase ='google/ddpm-cifar10-32'
__UpperCamelCase =UNetaDModel.from_pretrained(A_ )
__UpperCamelCase =PNDMScheduler()
__UpperCamelCase =PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =pndm(generator=A_ , output_type='numpy' ).images
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase =np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 682 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 1 |
_A = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_A = {value: key for key, value in MORSE_CODE_DICT.items()}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _UpperCAmelCase ( ):
__UpperCamelCase ='Morse code here!'
print(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =encrypt(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =decrypt(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 1 |
_A = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
# Return True if there is node that has not iterated.
__UpperCamelCase =[False] * len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[s]
__UpperCamelCase =True
while queue:
__UpperCamelCase =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =True
__UpperCamelCase =u
return visited[t]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =[-1] * (len(SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =0
__UpperCamelCase =[]
__UpperCamelCase =[i[:] for i in graph] # Record original cut, copy.
while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =float('Inf' )
__UpperCamelCase =sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] )
__UpperCamelCase =parent[s]
max_flow += path_flow
__UpperCamelCase =sink
while v != source:
__UpperCamelCase =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase =parent[v]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 682 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 1 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int | float | str , SCREAMING_SNAKE_CASE__ : int | float | str ):
if nth_term == "":
return [""]
__UpperCamelCase =int(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =int(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for temp in range(int(SCREAMING_SNAKE_CASE__ ) ):
series.append(F'1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE__ ) )}' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input('Enter the last number (nth term) of the P-Series'))
_A = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 682 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = "gptj"
UpperCAmelCase__ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , A_=50400 , A_=2048 , A_=4096 , A_=28 , A_=16 , A_=64 , A_=None , A_="gelu_new" , A_=0.0 , A_=0.0 , A_=0.0 , A_=1E-5 , A_=0.02 , A_=True , A_=50256 , A_=50256 , A_=False , **A_ , ) -> Tuple:
__UpperCamelCase =vocab_size
__UpperCamelCase =n_positions
__UpperCamelCase =n_embd
__UpperCamelCase =n_layer
__UpperCamelCase =n_head
__UpperCamelCase =n_inner
__UpperCamelCase =rotary_dim
__UpperCamelCase =activation_function
__UpperCamelCase =resid_pdrop
__UpperCamelCase =embd_pdrop
__UpperCamelCase =attn_pdrop
__UpperCamelCase =layer_norm_epsilon
__UpperCamelCase =initializer_range
__UpperCamelCase =use_cache
__UpperCamelCase =bos_token_id
__UpperCamelCase =eos_token_id
super().__init__(
bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = "default" , A_ = None , A_ = False , ) -> Optional[int]:
super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ )
if not getattr(self._config , 'pad_token_id' , A_ ):
# TODO: how to do that better?
__UpperCamelCase =0
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
__UpperCamelCase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__UpperCamelCase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _a ( self ) -> int:
return self._config.n_layer
@property
def _a ( self ) -> int:
return self._config.n_head
def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
__UpperCamelCase =super(A_ , self ).generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase =seqlen + 2
__UpperCamelCase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase =[
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers )
]
__UpperCamelCase =common_inputs['attention_mask']
if self.use_past:
__UpperCamelCase =ordered_inputs['attention_mask'].dtype
__UpperCamelCase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
return ordered_inputs
@property
def _a ( self ) -> int:
return 13
| 682 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ""
UpperCAmelCase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
UpperCAmelCase__ : str = None # compression type in fsspec. ex: "gzip"
UpperCAmelCase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , A_ = "" , A_ = None , A_ = None , **A_ ) -> Optional[Any]:
super().__init__(self , **A_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__UpperCamelCase =fsspec.open(
A_ , mode='rb' , protocol=A_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__UpperCamelCase =os.path.basename(self.file.path.split('::' )[0] )
__UpperCamelCase =(
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
__UpperCamelCase =None
@classmethod
def _a ( cls , A_ ) -> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(A_ ).lstrip('/' )
def _a ( self ) -> List[str]:
if self.dir_cache is None:
__UpperCamelCase ={**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
__UpperCamelCase ={f['name']: f}
def _a ( self , A_ ) -> Dict:
return self.file.open().read()
def _a ( self , A_ , A_ = "rb" , A_=None , A_=True , A_=None , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =self._strip_protocol(A_ )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "bz2"
UpperCAmelCase__ : Union[str, Any] = "bz2"
UpperCAmelCase__ : Dict = ".bz2"
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = "gzip"
UpperCAmelCase__ : Optional[int] = "gzip"
UpperCAmelCase__ : List[Any] = ".gz"
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = "lz4"
UpperCAmelCase__ : List[Any] = "lz4"
UpperCAmelCase__ : Optional[int] = ".lz4"
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "xz"
UpperCAmelCase__ : Optional[Any] = "xz"
UpperCAmelCase__ : Dict = ".xz"
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "zstd"
UpperCAmelCase__ : Optional[int] = "zstd"
UpperCAmelCase__ : Union[str, Any] = ".zst"
def __init__( self , A_ , A_ = "rb" , A_ = None , A_ = None , A_ = DEFAULT_BLOCK_SIZE , **A_ , ) -> str:
super().__init__(
fo=A_ , mode=A_ , target_protocol=A_ , target_options=A_ , block_size=A_ , **A_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__UpperCamelCase =self.file.__enter__
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> Union[str, Any]:
__UpperCamelCase =file_
def __enter__( self ) -> Any:
self._file.__enter__()
return self
def __exit__( self , *A_ , **A_ ) -> List[str]:
self._file.__exit__(*A_ , **A_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _a ( self ) -> List[str]:
return next(self._file )
def __getattr__( self , A_ ) -> List[Any]:
return getattr(self._file , A_ )
def fixed_enter(*A_ , **A_ ):
return WrappedFile(_enter(*A_ , **A_ ) )
__UpperCamelCase =fixed_enter
| 682 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_A = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , **A_ ) -> Optional[int]:
super().__init__(**A_ )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , A_ , **A_ ) -> List[str]:
return super().__call__(A_ , **A_ )
def _a ( self , **A_ ) -> Dict:
__UpperCamelCase ={}
if "candidate_labels" in kwargs:
__UpperCamelCase =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__UpperCamelCase =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _a ( self , A_ , A_=None , A_="This is a sound of {}." ) -> Union[str, Any]:
if isinstance(A_ , A_ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__UpperCamelCase =requests.get(A_ ).content
else:
with open(A_ , 'rb' ) as f:
__UpperCamelCase =f.read()
if isinstance(A_ , A_ ):
__UpperCamelCase =ffmpeg_read(A_ , self.feature_extractor.sampling_rate )
if not isinstance(A_ , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
__UpperCamelCase =self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
__UpperCamelCase =candidate_labels
__UpperCamelCase =[hypothesis_template.format(A_ ) for x in candidate_labels]
__UpperCamelCase =self.tokenizer(A_ , return_tensors=self.framework , padding=A_ )
__UpperCamelCase =[text_inputs]
return inputs
def _a ( self , A_ ) -> List[str]:
__UpperCamelCase =model_inputs.pop('candidate_labels' )
__UpperCamelCase =model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , A_ ):
__UpperCamelCase =text_inputs[0]
else:
# Batching case.
__UpperCamelCase =text_inputs[0][0]
__UpperCamelCase =self.model(**A_ , **A_ )
__UpperCamelCase ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =model_outputs.pop('candidate_labels' )
__UpperCamelCase =model_outputs['logits'][0]
if self.framework == "pt":
__UpperCamelCase =logits.softmax(dim=0 )
__UpperCamelCase =probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
__UpperCamelCase =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(A_ , A_ ) , key=lambda A_ : -x[0] )
]
return result
| 682 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 1 |
import os
import pytest
from attr import dataclass
_A = 'us-east-1' # defaults region
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str
UpperCAmelCase__ : Union[str, Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Tuple = {
"task_name": "mnli",
"per_device_train_batch_size": 1_6,
"per_device_eval_batch_size": 1_6,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_0_0,
"save_steps": 5_5_0_0,
}
UpperCAmelCase__ : Union[str, Any] = {**hyperparameters, "max_steps": 1_0_0_0}
@property
def _a ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _a ( self ) -> str:
return f'{self.framework}-transfromers-test'
@property
def _a ( self ) -> str:
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def _a ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =SageMakerTestEnvironment(framework=request.cls.framework )
| 682 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_A = logging.get_logger(__name__)
_A = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ) -> bool:
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = None ) -> Any:
__UpperCamelCase =max_length
__UpperCamelCase =max_position_embeddings
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ) -> bool:
__UpperCamelCase =input_ids.shape[-1]
__UpperCamelCase =cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
'exceptions, performance degradation, or nothing at all.' )
return is_done
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ ) -> Any:
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
'with `max_length = start_length + max_new_tokens` instead.' , A_ , )
__UpperCamelCase =start_length
__UpperCamelCase =max_new_tokens
__UpperCamelCase =start_length + max_new_tokens
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ) -> bool:
return input_ids.shape[-1] >= self.max_length
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = None ) -> Dict:
__UpperCamelCase =max_time
__UpperCamelCase =time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ) -> bool:
return any(criteria(A_ , A_ ) for criteria in self )
@property
def _a ( self ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(A_ , A_ ):
return stopping_criterium.max_length
elif isinstance(A_ , A_ ):
return stopping_criterium.max_length
return None
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : StoppingCriteriaList , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =stopping_criteria.max_length
__UpperCamelCase =deepcopy(SCREAMING_SNAKE_CASE__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , SCREAMING_SNAKE_CASE__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE__ ) )
return new_stopping_criteria
| 682 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[8, 16, 32, 64] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , A_=["stage2", "stage3", "stage4"] , A_=[2, 3, 4] , A_=1 , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =num_channels
__UpperCamelCase =embeddings_size
__UpperCamelCase =hidden_sizes
__UpperCamelCase =depths
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_act
__UpperCamelCase =num_labels
__UpperCamelCase =scope
__UpperCamelCase =len(A__ )
__UpperCamelCase =out_features
__UpperCamelCase =out_indices
__UpperCamelCase =num_groups
def _a ( self ) -> Dict:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Optional[int]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self , A_ , A_ , A_ ) -> str:
__UpperCamelCase =BitModel(config=A__ )
model.to(A__ )
model.eval()
__UpperCamelCase =model(A__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =BitForImageClassification(A__ )
model.to(A__ )
model.eval()
__UpperCamelCase =model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =BitBackbone(config=A__ )
model.to(A__ )
model.eval()
__UpperCamelCase =model(A__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCamelCase =None
__UpperCamelCase =BitBackbone(config=A__ )
model.to(A__ )
model.eval()
__UpperCamelCase =model(A__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Dict:
__UpperCamelCase =BitModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def _a ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> Tuple:
return
@unittest.skip(reason='Bit does not output attentions' )
def _a ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _a ( self ) -> str:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _a ( self ) -> Any:
pass
def _a ( self ) -> str:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A__ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A__ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A__ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A__ )
for name, module in model.named_modules():
if isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self ) -> List[str]:
def check_hidden_states_output(A_ , A_ , A_ ):
__UpperCamelCase =model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A__ , A__ ) )
__UpperCamelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase =self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCamelCase =layer_type
__UpperCamelCase =True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase =True
check_hidden_states_output(A__ , A__ , A__ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =BitModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self ) -> Any:
__UpperCamelCase =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A__ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A__ , return_tensors='pt' ).to(A__ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A__ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
__UpperCamelCase =torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4 ) )
@require_torch
class UpperCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : Any = BitConfig
UpperCAmelCase__ : str = False
def _a ( self ) -> Optional[int]:
__UpperCamelCase =BitModelTester(self )
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
import math
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return math.pow(__A , 2 ) - a
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
return 2 * x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =2.0
while start <= a:
__UpperCamelCase =math.pow(__A , 2 )
return start
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any = 99_99 , SCREAMING_SNAKE_CASE__ : Optional[int] = 0.00000000000001 ):
if a < 0:
raise ValueError('math domain error' )
__UpperCamelCase =get_initial_point(__A )
for _ in range(__A ):
__UpperCamelCase =value
__UpperCamelCase =value - fx(__A , __A ) / fx_derivative(__A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_A = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_A = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_A = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ):
return float((preds == labels).mean() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int="binary" ):
__UpperCamelCase =simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase =float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase ={}
for id_pred, label in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase =F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
__UpperCamelCase =id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__UpperCamelCase =[(pred, label)]
__UpperCamelCase , __UpperCamelCase =[], []
for question, preds_labels in question_map.items():
__UpperCamelCase , __UpperCamelCase =zip(*_SCREAMING_SNAKE_CASE )
__UpperCamelCase =fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average='macro' )
fas.append(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) )
ems.append(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase =sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _a ( self ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def _a ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def _a ( self , A_ , A_ ) -> Dict:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase_ , UpperCAmelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ , fa_avg='macro' )
elif self.config_name == "record":
__UpperCamelCase =[
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
__UpperCamelCase ={pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase_ , UpperCAmelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]' )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =384
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =128
__UpperCamelCase =2
__UpperCamelCase =9
__UpperCamelCase =1
__UpperCamelCase =None
def _a ( self ) -> List[str]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =TFConvBertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
__UpperCamelCase =TFConvBertForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFConvBertForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFConvBertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFConvBertForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =TFConvBertForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> str:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFConvBertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =True
__UpperCamelCase =True
if hasattr(A_ , 'use_cache' ):
__UpperCamelCase =True
__UpperCamelCase =getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__UpperCamelCase =getattr(self.model_tester , 'key_length' , A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =self._prepare_for_class(A_ , A_ )
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
__UpperCamelCase =os.path.join(A_ , 'saved_model' , '1' )
__UpperCamelCase =tf.keras.models.load_model(A_ )
__UpperCamelCase =model(A_ )
if self.is_encoder_decoder:
__UpperCamelCase =outputs['encoder_hidden_states']
__UpperCamelCase =outputs['encoder_attentions']
else:
__UpperCamelCase =outputs['hidden_states']
__UpperCamelCase =outputs['attentions']
self.assertEqual(len(A_ ) , A_ )
__UpperCamelCase =getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(A_ )
def _a ( self ) -> Any:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =True
__UpperCamelCase =getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__UpperCamelCase =getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__UpperCamelCase =getattr(self.model_tester , 'key_length' , A_ )
__UpperCamelCase =getattr(self.model_tester , 'key_length' , A_ )
def check_decoder_attentions_output(A_ ):
__UpperCamelCase =len(A_ )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase =outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
__UpperCamelCase =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =model(self._prepare_for_class(A_ , A_ ) )
__UpperCamelCase =len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase =True
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[Any]:
__UpperCamelCase =TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =[1, 6, 768]
self.assertEqual(output.shape , A_ )
__UpperCamelCase =tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_A = '''bert-base-cased'''
_A = '''fp16'''
_A = '''bf16'''
_A = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self ) -> int:
super().setUp()
__UpperCamelCase =dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def _a ( self ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase_ ):
__UpperCamelCase =self.dist_env.copy()
__UpperCamelCase =f'{i + 1}'
__UpperCamelCase =strategy
with mockenv_context(**UpperCamelCase_ ):
__UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _a ( self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase_ ):
__UpperCamelCase =self.dist_env.copy()
__UpperCamelCase =prefetch_policy
with mockenv_context(**UpperCamelCase_ ):
__UpperCamelCase =FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _a ( self ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase_ ):
__UpperCamelCase =self.dist_env.copy()
__UpperCamelCase =state_dict_type
with mockenv_context(**UpperCamelCase_ ):
__UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _a ( self ) -> str:
__UpperCamelCase =AutoModel.from_pretrained(UpperCamelCase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
__UpperCamelCase =self.dist_env.copy()
__UpperCamelCase =policy
if policy == "TRANSFORMER_BASED_WRAP":
__UpperCamelCase ='BertLayer'
elif policy == "SIZE_BASED_WRAP":
__UpperCamelCase ='2000'
with mockenv_context(**UpperCamelCase_ ):
__UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__UpperCamelCase =self.dist_env.copy()
__UpperCamelCase ='TRANSFORMER_BASED_WRAP'
__UpperCamelCase ='T5Layer'
with mockenv_context(**UpperCamelCase_ ):
__UpperCamelCase =FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase_ )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
__UpperCamelCase =self.dist_env.copy()
__UpperCamelCase ='SIZE_BASED_WRAP'
__UpperCamelCase ='0'
with mockenv_context(**UpperCamelCase_ ):
__UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _a ( self ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__UpperCamelCase =self.dist_env.copy()
__UpperCamelCase =mp_dtype
with mockenv_context(**UpperCamelCase_ ):
__UpperCamelCase =Accelerator()
if mp_dtype == "fp16":
__UpperCamelCase =torch.floataa
elif mp_dtype == "bf16":
__UpperCamelCase =torch.bfloataa
__UpperCamelCase =MixedPrecision(param_dtype=UpperCamelCase_ , reduce_dtype=UpperCamelCase_ , buffer_dtype=UpperCamelCase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCamelCase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCamelCase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase_ )
def _a ( self ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__UpperCamelCase =self.dist_env.copy()
__UpperCamelCase =str(UpperCamelCase_ ).lower()
with mockenv_context(**UpperCamelCase_ ):
__UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCamelCase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self ) -> Union[str, Any]:
super().setUp()
__UpperCamelCase =0.82
__UpperCamelCase =[
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
__UpperCamelCase ={
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__UpperCamelCase =160
__UpperCamelCase =160
__UpperCamelCase =inspect.getfile(accelerate.test_utils )
__UpperCamelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =os.path.join(self.test_scripts_folder , 'test_performance.py' )
__UpperCamelCase =['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
__UpperCamelCase =cmd.copy()
for i, strategy in enumerate(UpperCamelCase_ ):
if strategy.lower() in config:
cmd_config.append(f'--fsdp_sharding_strategy={i+1}' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'--output_dir={self.tmpdir}',
f'--performance_lower_bound={self.performance_lower_bound}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
__UpperCamelCase =[
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(UpperCamelCase_ ):
__UpperCamelCase =cmd.copy()
cmd_config.append(f'--fsdp_sharding_strategy={i+1}' )
if strategy != "FULL_SHARD":
continue
__UpperCamelCase =len(UpperCamelCase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__UpperCamelCase =cmd_config[:state_dict_config_index]
cmd_config.append(f'--fsdp_state_dict_type={state_dict_type}' )
cmd_config.extend(
[
self.test_file_path,
f'--output_dir={self.tmpdir}',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
__UpperCamelCase =cmd_config[:-1]
__UpperCamelCase =os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
f'--resume_from_checkpoint={resume_from_checkpoint}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
def _a ( self ) -> Dict:
__UpperCamelCase =os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
__UpperCamelCase =[
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__UpperCamelCase =cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(UpperCamelCase_ ):
if strategy.lower() in spec:
cmd_config.append(f'--fsdp_sharding_strategy={i+1}' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'--output_dir={self.tmpdir}',
f'--peak_memory_upper_bound={peak_mem_upper_bound}',
f'--n_train={self.n_train}',
f'--n_val={self.n_val}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase__ : str = ["pixel_values"]
def __init__( self , A_ = True , A_ = 32 , A_=PILImageResampling.BILINEAR , A_ = True , **A_ , ) -> Dict:
__UpperCamelCase =do_resize
__UpperCamelCase =do_rescale
__UpperCamelCase =size_divisor
__UpperCamelCase =resample
super().__init__(**__lowerCamelCase )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ ) -> Dict:
__UpperCamelCase =get_image_size(__lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase =height // size_divisor * size_divisor
__UpperCamelCase =width // size_divisor * size_divisor
__UpperCamelCase =resize(__lowerCamelCase , (new_h, new_w) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
return image
def _a ( self , A_ , A_ , A_ = None , **A_ ) -> List[str]:
return rescale(image=__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _a ( self , A_ , A_ = None , A_ = None , A_=None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> Dict:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__UpperCamelCase =make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(__lowerCamelCase ) for img in images]
if do_resize:
__UpperCamelCase =[self.resize(__lowerCamelCase , size_divisor=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(__lowerCamelCase , scale=1 / 255 ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
__UpperCamelCase ={"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ) -> int:
__UpperCamelCase =1
__UpperCamelCase =3
__UpperCamelCase =(32, 32)
__UpperCamelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
return image
@property
def _a ( self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def _a ( self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _a ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCAmelCase_ )
@property
def _a ( self ) -> Optional[int]:
def extract(*A_ , **A_ ):
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
__UpperCamelCase =torch.ones([0] )
def _a ( self , A_ ) -> List[Any]:
self.pixel_values.to(UpperCAmelCase_ )
return self
return Out()
return extract
def _a ( self ) -> Optional[int]:
__UpperCamelCase ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase =self.dummy_cond_unet
__UpperCamelCase =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
__UpperCamelCase =self.dummy_vae
__UpperCamelCase =self.dummy_text_encoder
__UpperCamelCase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__UpperCamelCase =StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
__UpperCamelCase =sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCamelCase ='A painting of a squirrel eating a burger'
__UpperCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
__UpperCamelCase =sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__UpperCamelCase =output.images
__UpperCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
__UpperCamelCase =sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase =np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> str:
__UpperCamelCase ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase =self.dummy_cond_unet
__UpperCamelCase =PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
__UpperCamelCase =self.dummy_vae
__UpperCamelCase =self.dummy_text_encoder
__UpperCamelCase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__UpperCamelCase =StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
__UpperCamelCase =sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCamelCase ='A painting of a squirrel eating a burger'
__UpperCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
__UpperCamelCase =sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__UpperCamelCase =output.images
__UpperCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
__UpperCamelCase =sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase =np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert isinstance(pipe.scheduler , UpperCAmelCase_ )
assert pipe.safety_checker is None
__UpperCamelCase =pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_ )
__UpperCamelCase =StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase =pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.dummy_cond_unet
__UpperCamelCase =PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
__UpperCamelCase =self.dummy_vae
__UpperCamelCase =self.dummy_text_encoder
__UpperCamelCase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
__UpperCamelCase =unet.half()
__UpperCamelCase =vae.half()
__UpperCamelCase =bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase =StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
__UpperCamelCase =sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCamelCase ='A painting of a squirrel eating a burger'
__UpperCamelCase =sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> int:
__UpperCamelCase =StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_ )
__UpperCamelCase =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase =sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCamelCase =(
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
__UpperCamelCase =4003660346
__UpperCamelCase =7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase =torch.manual_seed(UpperCAmelCase_ )
__UpperCamelCase =sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__UpperCamelCase =output.images
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =[0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase =torch.manual_seed(UpperCAmelCase_ )
__UpperCamelCase =sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase =output.images
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =[0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
__UpperCamelCase =StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_ )
__UpperCamelCase =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase =sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCamelCase ='padme amidala taking a bath artwork, safe for work, no nudity'
__UpperCamelCase =2734971755
__UpperCamelCase =7
__UpperCamelCase =torch.manual_seed(UpperCAmelCase_ )
__UpperCamelCase =sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__UpperCamelCase =output.images
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =[0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase =torch.manual_seed(UpperCAmelCase_ )
__UpperCamelCase =sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase =output.images
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =[0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
__UpperCamelCase =StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
__UpperCamelCase =sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCamelCase =(
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
__UpperCamelCase =1044355234
__UpperCamelCase =12
__UpperCamelCase =torch.manual_seed(UpperCAmelCase_ )
__UpperCamelCase =sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__UpperCamelCase =output.images
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase =torch.manual_seed(UpperCAmelCase_ )
__UpperCamelCase =sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase =output.images
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
if num <= 0:
raise ValueError('math domain error' )
return quad(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , args=(SCREAMING_SNAKE_CASE__) )[0]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> Union[str, Any]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='''gelu'''
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Dict:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFDistilBertModel(config=A_ )
__UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =TFDistilBertForMaskedLM(config=A_ )
__UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
__UpperCamelCase =TFDistilBertForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFDistilBertForSequenceClassification(A_ )
__UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFDistilBertForMultipleChoice(A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFDistilBertForTokenClassification(A_ )
__UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(__UpperCamelCase) =config_and_inputs
__UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase__ : Union[str, Any] = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
def _a ( self ) -> str:
__UpperCamelCase =TFDistilBertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> Tuple:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A_ )
@slow
def _a ( self ) -> Dict:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase =TFDistilBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[Any]:
__UpperCamelCase =TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =[1, 6, 768]
self.assertEqual(output.shape , A_ )
__UpperCamelCase =tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =multiprocessing.Manager()
__UpperCamelCase =manager.list()
__UpperCamelCase =multiprocessing.Process(target=lowerCamelCase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__UpperCamelCase =shutil.rmtree
__UpperCamelCase =os.rmdir
__UpperCamelCase =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__UpperCamelCase ={}
with swallow_io():
with time_limit(lowerCamelCase__ ):
exec(lowerCamelCase__ , lowerCamelCase__ )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
__UpperCamelCase =rmtree
__UpperCamelCase =rmdir
__UpperCamelCase =chdir
@contextlib.contextmanager
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
def signal_handler(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , lowerCamelCase__ )
signal.signal(signal.SIGALRM , lowerCamelCase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _UpperCAmelCase ( ):
__UpperCamelCase =WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCamelCase__ ):
with contextlib.redirect_stderr(lowerCamelCase__ ):
with redirect_stdin(lowerCamelCase__ ):
yield
@contextlib.contextmanager
def _UpperCAmelCase ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCamelCase__ ):
yield dirname
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
pass
class UpperCAmelCase__ ( io.StringIO ):
"""simple docstring"""
def _a ( self , *A_ , **A_ ) -> Dict:
raise OSError
def _a ( self , *A_ , **A_ ) -> Optional[Any]:
raise OSError
def _a ( self , *A_ , **A_ ) -> str:
raise OSError
def _a ( self , *A_ , **A_ ) -> Optional[Any]:
return False
class UpperCAmelCase__ ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
UpperCAmelCase__ : List[str] = "stdin"
@contextlib.contextmanager
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
if root == ".":
yield
return
__UpperCamelCase =os.getcwd()
os.chdir(lowerCamelCase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCamelCase__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__UpperCamelCase =None
__UpperCamelCase =None
import os
__UpperCamelCase ="1"
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
import shutil
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
import subprocess
__UpperCamelCase =None # type: ignore
__UpperCamelCase =None
import sys
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
import requests
_A = 'YOUR API KEY'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = giphy_api_key ):
__UpperCamelCase ="+".join(query.split() )
__UpperCamelCase =F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
__UpperCamelCase =requests.get(_lowerCAmelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =filter(lambda SCREAMING_SNAKE_CASE__ : p.requires_grad , model.parameters() )
__UpperCamelCase =sum([np.prod(p.size() ) for p in model_parameters] )
return params
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if metric == "rouge2":
__UpperCamelCase ="""{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__UpperCamelCase ="""{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__UpperCamelCase ="""{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
__UpperCamelCase ="""{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__UpperCamelCase =ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class UpperCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase ={f'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def _a ( self , A_ , A_ , A_ , A_=True ) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
__UpperCamelCase =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__UpperCamelCase =Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase =od / """test_results.txt"""
__UpperCamelCase =od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase =od / f'{type_path}_results/{trainer.global_step:05d}.txt'
__UpperCamelCase =od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , 'a+' ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase =metrics[key]
if isinstance(_a , torch.Tensor ):
__UpperCamelCase =val.item()
__UpperCamelCase =f'{key}: {val:.6f}\n'
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase ="""\n""".join(metrics['preds'] )
generations_file.open('w+' ).write(_a )
@rank_zero_only
def _a ( self , A_ , A_ ) -> Tuple:
try:
__UpperCamelCase =pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase =pl_module.model.num_parameters()
__UpperCamelCase =count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self , A_ , A_ ) -> str:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , 'test' )
@rank_zero_only
def _a ( self , A_ , A_ ) -> Union[str, Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.