code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = CLIPConfig
_lowerCamelCase : Optional[Any] = ["""CLIPEncoderLayer"""]
def __init__( self : Dict , snake_case_ : CLIPConfig ):
super().__init__(snake_case_ )
_UpperCAmelCase = CLIPVisionModelWithProjection(config.vision_config )
_UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 )
_UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowercase ( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=0.5 , snake_case_ : List[str]=0.5 ):
_UpperCAmelCase = self.vision_model(snake_case_ )[0]
_UpperCAmelCase = self.p_head(snake_case_ )
_UpperCAmelCase = nsfw_detected.flatten()
_UpperCAmelCase = nsfw_detected > p_threshold
_UpperCAmelCase = nsfw_detected.tolist()
if any(snake_case_ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(snake_case_ ):
if nsfw_detected_:
_UpperCAmelCase = np.zeros(images[idx].shape )
_UpperCAmelCase = self.w_head(snake_case_ )
_UpperCAmelCase = watermark_detected.flatten()
_UpperCAmelCase = watermark_detected > w_threshold
_UpperCAmelCase = watermark_detected.tolist()
if any(snake_case_ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(snake_case_ ):
if watermark_detected_:
_UpperCAmelCase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 22 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase__: List[str] = logging.get_logger(__name__)
UpperCamelCase__: Any = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : Optional[int]=None , **__snake_case : str ) -> Union[str, Any]:
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
UpperCAmelCase : Union[str, Any] = model
UpperCAmelCase : Optional[Any] = kwargs.get('''model_save_dir''' , __snake_case )
UpperCAmelCase : Optional[int] = kwargs.get('''latest_model_name''' , __snake_case )
def __call__( self : Optional[Any] , **__snake_case : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = {k: np.array(__snake_case ) for k, v in kwargs.items()}
return self.model.run(__snake_case , __snake_case )
@staticmethod
def A ( __snake_case : Union[str, Path] , __snake_case : List[str]=None , __snake_case : Optional[int]=None ) -> Optional[int]:
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
UpperCAmelCase : List[Any] = '''CPUExecutionProvider'''
return ort.InferenceSession(__snake_case , providers=[provider] , sess_options=__snake_case )
def A ( self : Optional[Any] , __snake_case : Union[str, Path] , __snake_case : Optional[str] = None , **__snake_case : int ) -> str:
UpperCAmelCase : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase : int = Path(__snake_case ).joinpath(__snake_case )
try:
shutil.copyfile(__snake_case , __snake_case )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase : Tuple = self.model_save_dir.joinpath(__snake_case )
if src_path.exists():
UpperCAmelCase : List[Any] = Path(__snake_case ).joinpath(__snake_case )
try:
shutil.copyfile(__snake_case , __snake_case )
except shutil.SameFileError:
pass
def A ( self : Optional[int] , __snake_case : Union[str, os.PathLike] , **__snake_case : int , ) -> List[Any]:
if os.path.isfile(__snake_case ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
# saving model weights/files
self._save_pretrained(__snake_case , **__snake_case )
@classmethod
def A ( cls : Optional[int] , __snake_case : Union[str, Path] , __snake_case : Optional[Union[bool, str, None]] = None , __snake_case : Optional[Union[str, None]] = None , __snake_case : bool = False , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional["ort.SessionOptions"] = None , **__snake_case : Any , ) -> List[str]:
UpperCAmelCase : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__snake_case ):
UpperCAmelCase : Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(__snake_case , __snake_case ) , provider=__snake_case , sess_options=__snake_case )
UpperCAmelCase : List[Any] = Path(__snake_case )
# load model from hub
else:
# download model
UpperCAmelCase : List[str] = hf_hub_download(
repo_id=__snake_case , filename=__snake_case , use_auth_token=__snake_case , revision=__snake_case , cache_dir=__snake_case , force_download=__snake_case , )
UpperCAmelCase : int = Path(__snake_case ).parent
UpperCAmelCase : Union[str, Any] = Path(__snake_case ).name
UpperCAmelCase : Optional[Any] = OnnxRuntimeModel.load_model(__snake_case , provider=__snake_case , sess_options=__snake_case )
return cls(model=__snake_case , **__snake_case )
@classmethod
def A ( cls : Any , __snake_case : Union[str, Path] , __snake_case : bool = True , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , **__snake_case : Optional[int] , ) -> str:
UpperCAmelCase : str = None
if len(str(__snake_case ).split('''@''' ) ) == 2:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = model_id.split('''@''' )
return cls._from_pretrained(
model_id=__snake_case , revision=__snake_case , cache_dir=__snake_case , force_download=__snake_case , use_auth_token=__snake_case , **__snake_case , )
| 23 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : int , a__ : int , a__ : int , a__ : int , a__ : Dict=0.0 , a__ : Optional[int] = None , a__ : str = "geglu" , a__ : Optional[int] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : str = "layer_norm" , a__ : bool = False , ):
"""simple docstring"""
super().__init__()
__snake_case = only_cross_attention
__snake_case = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__snake_case = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__snake_case = AdaLayerNorm(a__ , a__ )
elif self.use_ada_layer_norm_zero:
__snake_case = AdaLayerNormZero(a__ , a__ )
else:
__snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ )
__snake_case = Attention(
query_dim=a__ , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__snake_case = (
AdaLayerNorm(a__ , a__ )
if self.use_ada_layer_norm
else nn.LayerNorm(a__ , elementwise_affine=a__ )
)
__snake_case = Attention(
query_dim=a__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , upcast_attention=a__ , ) # is self-attn if encoder_hidden_states is none
else:
__snake_case = None
__snake_case = None
# 3. Feed-forward
__snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ )
__snake_case = FeedForward(a__ , dropout=a__ , activation_fn=a__ , final_dropout=a__ )
# let chunk size default to None
__snake_case = None
__snake_case = 0
def a (self : int , a__ : Optional[int] , a__ : int ):
"""simple docstring"""
__snake_case = chunk_size
__snake_case = dim
def a (self : Dict , a__ : torch.FloatTensor , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.LongTensor] = None , a__ : Dict[str, Any] = None , a__ : Optional[torch.LongTensor] = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
__snake_case = self.norma(a__ , a__ )
elif self.use_ada_layer_norm_zero:
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = self.norma(
a__ , a__ , a__ , hidden_dtype=hidden_states.dtype )
else:
__snake_case = self.norma(a__ )
__snake_case = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__snake_case = self.attna(
a__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a__ , **a__ , )
if self.use_ada_layer_norm_zero:
__snake_case = gate_msa.unsqueeze(1 ) * attn_output
__snake_case = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__snake_case = (
self.norma(a__ , a__ ) if self.use_ada_layer_norm else self.norma(a__ )
)
__snake_case = self.attna(
a__ , encoder_hidden_states=a__ , attention_mask=a__ , **a__ , )
__snake_case = attn_output + hidden_states
# 3. Feed-forward
__snake_case = self.norma(a__ )
if self.use_ada_layer_norm_zero:
__snake_case = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__snake_case = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__snake_case = torch.cat(
[self.ff(a__ ) for hid_slice in norm_hidden_states.chunk(a__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__snake_case = self.ff(a__ )
if self.use_ada_layer_norm_zero:
__snake_case = gate_mlp.unsqueeze(1 ) * ff_output
__snake_case = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Any , a__ : int , a__ : Optional[int] = None , a__ : int = 4 , a__ : float = 0.0 , a__ : str = "geglu" , a__ : bool = False , ):
"""simple docstring"""
super().__init__()
__snake_case = int(dim * mult )
__snake_case = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__snake_case = GELU(a__ , a__ )
if activation_fn == "gelu-approximate":
__snake_case = GELU(a__ , a__ , approximate='''tanh''' )
elif activation_fn == "geglu":
__snake_case = GEGLU(a__ , a__ )
elif activation_fn == "geglu-approximate":
__snake_case = ApproximateGELU(a__ , a__ )
__snake_case = nn.ModuleList([] )
# project in
self.net.append(a__ )
# project dropout
self.net.append(nn.Dropout(a__ ) )
# project out
self.net.append(nn.Linear(a__ , a__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(a__ ) )
def a (self : Union[str, Any] , a__ : Optional[int] ):
"""simple docstring"""
for module in self.net:
__snake_case = module(a__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Tuple , a__ : int , a__ : int , a__ : str = "none" ):
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(a__ , a__ )
__snake_case = approximate
def a (self : Any , a__ : str ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(a__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def a (self : Any , a__ : List[str] ):
"""simple docstring"""
__snake_case = self.proj(a__ )
__snake_case = self.gelu(a__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Optional[int] , a__ : int , a__ : int ):
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(a__ , dim_out * 2 )
def a (self : Dict , a__ : Optional[int] ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(a__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a (self : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.proj(a__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(a__ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Dict , a__ : int , a__ : int ):
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(a__ , a__ )
def a (self : str , a__ : Tuple ):
"""simple docstring"""
__snake_case = self.proj(a__ )
return x * torch.sigmoid(1.7_0_2 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Dict , a__ : Optional[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__snake_case = nn.Embedding(a__ , a__ )
__snake_case = nn.SiLU()
__snake_case = nn.Linear(a__ , embedding_dim * 2 )
__snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ )
def a (self : Any , a__ : Optional[int] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.linear(self.silu(self.emb(a__ ) ) )
__snake_case , __snake_case = torch.chunk(a__ , 2 )
__snake_case = self.norm(a__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Dict , a__ : Dict , a__ : str ):
"""simple docstring"""
super().__init__()
__snake_case = CombinedTimestepLabelEmbeddings(a__ , a__ )
__snake_case = nn.SiLU()
__snake_case = nn.Linear(a__ , 6 * embedding_dim , bias=a__ )
__snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ , eps=1E-6 )
def a (self : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = self.linear(self.silu(self.emb(a__ , a__ , hidden_dtype=a__ ) ) )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = emb.chunk(6 , dim=1 )
__snake_case = self.norm(a__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Optional[int] , a__ : int , a__ : int , a__ : int , a__ : Optional[str] = None , a__ : float = 1E-5 ):
"""simple docstring"""
super().__init__()
__snake_case = num_groups
__snake_case = eps
if act_fn is None:
__snake_case = None
else:
__snake_case = get_activation(a__ )
__snake_case = nn.Linear(a__ , out_dim * 2 )
def a (self : List[str] , a__ : Dict , a__ : List[str] ):
"""simple docstring"""
if self.act:
__snake_case = self.act(a__ )
__snake_case = self.linear(a__ )
__snake_case = emb[:, :, None, None]
__snake_case , __snake_case = emb.chunk(2 , dim=1 )
__snake_case = F.group_norm(a__ , self.num_groups , eps=self.eps )
__snake_case = x * (1 + scale) + shift
return x
| 24 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a__ , )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = RobertaConfig
__UpperCamelCase : Optional[Any] = '''roberta'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = RobertaEmbeddings(SCREAMING_SNAKE_CASE__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , a__ , )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = RobertaConfig
__UpperCamelCase : List[str] = '''roberta'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = config.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = config.num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = DeeRobertaModel(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=-1 , SCREAMING_SNAKE_CASE__=False , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_layers
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.roberta(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = outputs[1]
SCREAMING_SNAKE_CASE__ : str = self.dropout(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.classifier(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE__ : Optional[Any] = e.message
SCREAMING_SNAKE_CASE__ : Optional[int] = e.exit_layer
SCREAMING_SNAKE_CASE__ : Any = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE__ : List[Any] = entropy(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MSELoss()
SCREAMING_SNAKE_CASE__ : Optional[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE__ : List[Any] = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(SCREAMING_SNAKE_CASE__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Any = MSELoss()
SCREAMING_SNAKE_CASE__ : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(SCREAMING_SNAKE_CASE__ )
if train_highway:
SCREAMING_SNAKE_CASE__ : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE__ : List[str] = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE__ : str = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE__ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 25 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_snake_case = 16
_snake_case = 32
def lowerCAmelCase_ ( snake_case_ ):
return int(x / 2**20 )
class lowercase :
def __enter__( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_A : Any = torch.cuda.memory_allocated()
return self
def __exit__( self , *_a ) -> List[str]:
gc.collect()
torch.cuda.empty_cache()
_A : List[Any] = torch.cuda.memory_allocated()
_A : Dict = torch.cuda.max_memory_allocated()
_A : str = bamb(self.end - self.begin )
_A : Any = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( snake_case_,snake_case_ = 16,snake_case_ = "bert-base-cased",snake_case_ = 320,snake_case_ = 160,):
_A : List[Any] = AutoTokenizer.from_pretrained(snake_case_ )
_A : List[str] = load_dataset(
"""glue""","""mrpc""",split={"""train""": f'''train[:{n_train}]''', """validation""": f'''validation[:{n_val}]'''} )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
_A : Union[str, Any] = tokenizer(examples["""sentence1"""],examples["""sentence2"""],truncation=snake_case_,max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_A : List[Any] = datasets.map(
snake_case_,batched=snake_case_,remove_columns=["""idx""", """sentence1""", """sentence2"""],load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A : List[Any] = tokenized_datasets.rename_column("""label""","""labels""" )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_,padding="""max_length""",max_length=128,return_tensors="""pt""" )
return tokenizer.pad(snake_case_,padding="""longest""",return_tensors="""pt""" )
# Instantiate dataloaders.
_A : Dict = DataLoader(
tokenized_datasets["""train"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ )
_A : int = DataLoader(
tokenized_datasets["""validation"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Initialize accelerator
_A : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A : Optional[int] = config["""lr"""]
_A : Tuple = int(config["""num_epochs"""] )
_A : Any = int(config["""seed"""] )
_A : Tuple = int(config["""batch_size"""] )
_A : Any = args.model_name_or_path
set_seed(snake_case_ )
_A , _A : Union[str, Any] = get_dataloaders(snake_case_,snake_case_,snake_case_,args.n_train,args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A : Optional[int] = AutoModelForSequenceClassification.from_pretrained(snake_case_,return_dict=snake_case_ )
# Instantiate optimizer
_A : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_A : Tuple = optimizer_cls(params=model.parameters(),lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
_A : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_A : int = 1
_A : str = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_A : str = get_linear_schedule_with_warmup(
optimizer=snake_case_,num_warmup_steps=0,num_training_steps=snake_case_,)
else:
_A : Union[str, Any] = DummyScheduler(snake_case_,total_num_steps=snake_case_,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A : Dict = accelerator.prepare(
snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )
# We need to keep track of how many total steps we have iterated over
_A : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_A : Optional[Any] = 0
# Now we train the model
_A : Optional[Any] = {}
for epoch in range(snake_case_,snake_case_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case_ ):
_A : Dict = model(**snake_case_ )
_A : Dict = outputs.loss
_A : Dict = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_A : List[str] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir,"""peak_memory_utilization.json""" ),"""w""" ) as f:
json.dump(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""",type=snake_case_,default="""bert-base-cased""",help="""Path to pretrained model or model identifier from huggingface.co/models.""",required=snake_case_,)
parser.add_argument(
"""--output_dir""",type=snake_case_,default=""".""",help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""",)
parser.add_argument(
"""--peak_memory_upper_bound""",type=snake_case_,default=snake_case_,help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""",)
parser.add_argument(
"""--n_train""",type=snake_case_,default=320,help="""Number of training examples to use.""",)
parser.add_argument(
"""--n_val""",type=snake_case_,default=160,help="""Number of validation examples to use.""",)
parser.add_argument(
"""--num_epochs""",type=snake_case_,default=1,help="""Number of train epochs.""",)
_A : Optional[int] = parser.parse_args()
_A : Optional[int] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case_,snake_case_ )
if __name__ == "__main__":
main()
| 26 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=True , __a=False , __a=10 , __a=3 , __a=32 * 8 , __a=32 * 8 , __a=4 , __a=64 , ):
'''simple docstring'''
__a : Any = parent
__a : Any = batch_size
__a : Tuple = is_training
__a : Optional[int] = use_auxiliary_loss
__a : List[Any] = num_queries
__a : Any = num_channels
__a : str = min_size
__a : str = max_size
__a : Any = num_labels
__a : Union[str, Any] = hidden_dim
__a : Optional[Any] = hidden_dim
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__a )
__a : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__a )
__a : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__a ) > 0.5
).float()
__a : Tuple = (torch.rand((self.batch_size, self.num_labels) , device=__a ) > 0.5).long()
__a : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__a : Dict = self.num_queries
__a : List[str] = self.num_labels
__a : List[str] = [1, 1, 1, 1]
__a : Any = self.num_channels
__a : int = 64
__a : Optional[int] = 128
__a : str = self.hidden_dim
__a : int = self.hidden_dim
__a : List[Any] = self.hidden_dim
return config
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a , __a , __a , __a : int = self.prepare_config_and_inputs()
__a : List[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Tuple = output.encoder_hidden_states
__a : Dict = output.pixel_decoder_hidden_states
__a : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__a ) , config.decoder_layers )
def __UpperCAmelCase ( self , __a , __a , __a , __a=False ):
'''simple docstring'''
with torch.no_grad():
__a : List[str] = MaskaFormerModel(config=__a )
model.to(__a )
model.eval()
__a : List[str] = model(pixel_values=__a , pixel_mask=__a )
__a : Optional[int] = model(__a , output_hidden_states=__a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__a , __a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = MaskaFormerForUniversalSegmentation(config=__a )
model.to(__a )
model.eval()
def comm_check_on_output(__a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__a : Union[str, Any] = model(pixel_values=__a , pixel_mask=__a )
__a : Optional[Any] = model(__a )
comm_check_on_output(__a )
__a : Dict = model(
pixel_values=__a , pixel_mask=__a , mask_labels=__a , class_labels=__a )
comm_check_on_output(__a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = MaskaFormerModelTester(self )
__a : str = ConfigTester(self , config_class=__a , has_text_modality=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a , **__a , output_hidden_states=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = model_class(__a )
__a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Tuple = [*signature.parameters.keys()]
__a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__a : Optional[Any] = MaskaFormerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = (self.model_tester.min_size,) * 2
__a : Optional[int] = {
'pixel_values': torch.randn((2, 3, *size) , device=__a ),
'mask_labels': torch.randn((2, 10, *size) , device=__a ),
'class_labels': torch.zeros(2 , 10 , device=__a ).long(),
}
__a : Dict = self.model_tester.get_config()
__a : List[Any] = MaskaFormerForUniversalSegmentation(__a ).to(__a )
__a : List[str] = model(**__a )
self.assertTrue(outputs.loss is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a , **__a , output_hidden_states=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = model_class(__a ).to(__a )
__a : List[str] = model(**__a , output_attentions=__a )
self.assertTrue(outputs.attentions is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__a : List[str] = self.all_model_classes[1]
__a , __a , __a , __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs()
__a : List[str] = model_class(__a )
model.to(__a )
model.train()
__a : Union[str, Any] = model(__a , mask_labels=__a , class_labels=__a ).loss
loss.backward()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.all_model_classes[1]
__a , __a , __a , __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs()
__a : List[Any] = True
__a : Optional[int] = True
__a : Optional[int] = model_class(__a ).to(__a )
model.train()
__a : Optional[int] = model(__a , mask_labels=__a , class_labels=__a )
__a : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__a : List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__a : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__a : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowercase : Any = 1E-4
def lowerCamelCase ():
__a : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__a )
__a : List[Any] = self.default_image_processor
__a : Dict = prepare_img()
__a : Union[str, Any] = image_processor(__a , return_tensors='pt' ).to(__a )
__a : int = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__a , (1, 3, 384, 384) )
with torch.no_grad():
__a : List[str] = model(**__a )
__a : str = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a ) )
__a : Optional[int] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a ) )
__a : Dict = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __a , atol=__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__a ).eval()
__a : List[Any] = self.default_image_processor
__a : Optional[Any] = prepare_img()
__a : Optional[int] = image_processor(__a , return_tensors='pt' ).to(__a )
__a : Optional[int] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__a , (1, 3, 384, 384) )
with torch.no_grad():
__a : Union[str, Any] = model(**__a )
# masks_queries_logits
__a : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__a : Dict = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__a : List[str] = torch.tensor(__a ).to(__a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __a , atol=__a ) )
# class_queries_logits
__a : Optional[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__a : Tuple = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __a , atol=__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__a ).eval()
__a : Optional[int] = self.default_image_processor
__a : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__a : Any = inputs['pixel_values'].to(__a )
__a : int = [el.to(__a ) for el in inputs['mask_labels']]
__a : List[Any] = [el.to(__a ) for el in inputs['class_labels']]
with torch.no_grad():
__a : Union[str, Any] = model(**__a )
self.assertTrue(outputs.loss is not None )
| 27 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCamelCase ( A__ ) -> list[list[float]]:
"""simple docstring"""
UpperCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
UpperCamelCase = [[0.0, 0.0], [0.0, 0.0]]
UpperCamelCase , UpperCamelCase = matrix[1][1], matrix[0][0]
UpperCamelCase , UpperCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
UpperCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCamelCase = array(A__ )
for i in range(3 ):
for j in range(3 ):
UpperCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCamelCase = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 28 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=6_4 , _UpperCamelCase=3_2 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : Optional[int] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : str = num_choices
UpperCAmelCase_ : Optional[Any] = scope
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = None
if self.use_input_mask:
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ) -> Optional[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Dict = MegatronBertModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Dict = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
UpperCAmelCase_ : Tuple = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = MegatronBertForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = MegatronBertForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = MegatronBertForNextSentencePrediction(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = MegatronBertForPreTraining(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[str] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , next_sentence_label=_UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Optional[int] = MegatronBertForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Dict = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = MegatronBertForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Any = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Optional[Any] = MegatronBertForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.num_choices
UpperCAmelCase_ : str = MegatronBertForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Tuple = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case : str = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Any = True
# test_resize_embeddings = False
_snake_case : List[str] = False
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ) -> Any:
UpperCAmelCase_ : Dict = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
UpperCAmelCase_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCamelCase )
UpperCAmelCase_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : int = MegatronBertModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_UpperCamelCase )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
return torch.tensor(
__snake_case , dtype=torch.long , device=__snake_case , )
__UpperCAmelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('Model is not available.' )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
UpperCAmelCase_ : List[str] = os.path.join(os.environ['MYDIR'] , _UpperCamelCase )
UpperCAmelCase_ : Tuple = MegatronBertModel.from_pretrained(_UpperCamelCase )
model.to(_UpperCamelCase )
model.half()
UpperCAmelCase_ : Any = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
UpperCAmelCase_ : str = model(_UpperCamelCase )[0]
UpperCAmelCase_ : List[Any] = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase_ : Optional[int] = output[0, ii, jj]
UpperCAmelCase_ : Any = expected[3 * ii + jj]
UpperCAmelCase_ : List[Any] = 'ii={} jj={} a={} b={}'.format(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertTrue(math.isclose(_UpperCamelCase , _UpperCamelCase , rel_tol=_UpperCamelCase , abs_tol=_UpperCamelCase ) , msg=_UpperCamelCase )
| 29 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
lowercase_ = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , snake_case__ )
if matches:
lowercase_ = float(matches[1] )
lowercase_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase_ = 1_001
lowercase_ = '''imagenet-1k-id2label.json'''
lowercase_ = '''huggingface/label-files'''
lowercase_ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ = {int(snake_case__ ) + 1: v for k, v in idalabel.items()}
lowercase_ = '''background'''
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
lowercase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def a ( snake_case__: Optional[int] , snake_case__: int , snake_case__: Dict , snake_case__: Optional[Any]=False ):
'''simple docstring'''
lowercase_ = get_mobilenet_va_config(snake_case__ )
# Load 🤗 model
lowercase_ = MobileNetVaForImageClassification(snake_case__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case__ , snake_case__ , snake_case__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase_ = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
lowercase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
lowercase_ = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase_ = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print('''Pushing to the hub...''' )
lowercase_ = '''google/''' + model_name
image_processor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 30 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
_UpperCAmelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
_UpperCAmelCase : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(A )
def _A ( self : int ):
_UpperCAmelCase : int = logging.get_verbosity()
_UpperCAmelCase : int = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : List[str] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(A )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def _A ( self : Dict ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_UpperCAmelCase : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : Optional[int] = os.getenv("TRANSFORMERS_VERBOSITY" , A )
_UpperCAmelCase : Any = logging.log_levels[env_level_str]
_UpperCAmelCase : Tuple = logging.get_verbosity()
self.assertEqual(
A , A , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_UpperCAmelCase : int = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def _A ( self : Union[str, Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : List[Any] = logging.logging.getLogger()
with CaptureLogger(A ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def _A ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : Optional[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out , msg + "\n" )
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 31 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
snake_case__ : bool = field(default=lowercase__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
snake_case__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
snake_case__ : bool = field(default=lowercase__ , metadata={'''help''': '''whether to use adafactor'''} )
snake_case__ : Optional[float] = field(
default=lowercase__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
snake_case__ : Optional[float] = field(
default=lowercase__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
snake_case__ : Optional[float] = field(default=lowercase__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
snake_case__ : Optional[float] = field(
default=lowercase__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
snake_case__ : Optional[str] = field(
default='''linear''' , metadata={'''help''': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 32 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Any , A : int ) -> None:
lowercase_ : List[str] = value
lowercase_ : Node | None = None
lowercase_ : Node | None = None
class _UpperCAmelCase :
def __init__( self : Optional[int] , A : Node ) -> None:
lowercase_ : Optional[Any] = tree
def A ( self : Any , A : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A =logging.getLogger()
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ):
UpperCAmelCase = os.path.join(_a , F"{split}_results.json" )
if os.path.exists(_a ):
with open(_a , '''r''' ) as f:
return json.load(_a )
raise ValueError(F"can't find {path}" )
A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_glue.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_clm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_summarization_flax.main()
UpperCAmelCase = get_results(lowercase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_ner.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_qa.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "xlm"
lowercase = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : Tuple , snake_case_ : Optional[int]=30_145 , snake_case_ : Optional[Any]=2_048 , snake_case_ : Optional[Any]=12 , snake_case_ : Dict=16 , snake_case_ : Optional[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : Tuple=True , snake_case_ : Union[str, Any]=False , snake_case_ : Dict=False , snake_case_ : Optional[int]=False , snake_case_ : Dict=1 , snake_case_ : List[Any]=True , snake_case_ : Tuple=512 , snake_case_ : List[str]=2_048**-0.5 , snake_case_ : List[Any]=1E-1_2 , snake_case_ : int=0.02 , snake_case_ : List[str]=0 , snake_case_ : Optional[int]=1 , snake_case_ : List[Any]=2 , snake_case_ : str=3 , snake_case_ : Union[str, Any]=5 , snake_case_ : List[str]=True , snake_case_ : List[str]="first" , snake_case_ : List[Any]=True , snake_case_ : List[Any]=None , snake_case_ : List[str]=True , snake_case_ : Dict=0.1 , snake_case_ : Any=5 , snake_case_ : Optional[Any]=5 , snake_case_ : List[Any]=0 , snake_case_ : List[str]=0 , snake_case_ : str=2 , snake_case_ : Tuple=0 , **snake_case_ : List[Any] , ):
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = emb_dim
snake_case__ : int = n_layers
snake_case__ : Dict = n_heads
snake_case__ : Union[str, Any] = dropout
snake_case__ : List[str] = attention_dropout
snake_case__ : Optional[int] = gelu_activation
snake_case__ : Union[str, Any] = sinusoidal_embeddings
snake_case__ : Union[str, Any] = causal
snake_case__ : Tuple = asm
snake_case__ : int = n_langs
snake_case__ : int = use_lang_emb
snake_case__ : List[Any] = layer_norm_eps
snake_case__ : Union[str, Any] = bos_index
snake_case__ : int = eos_index
snake_case__ : str = pad_index
snake_case__ : str = unk_index
snake_case__ : Tuple = mask_index
snake_case__ : Optional[int] = is_encoder
snake_case__ : int = max_position_embeddings
snake_case__ : List[Any] = embed_init_std
snake_case__ : List[str] = init_std
snake_case__ : Any = summary_type
snake_case__ : Tuple = summary_use_proj
snake_case__ : int = summary_activation
snake_case__ : Optional[int] = summary_proj_to_labels
snake_case__ : Optional[int] = summary_first_dropout
snake_case__ : str = start_n_top
snake_case__ : Union[str, Any] = end_n_top
snake_case__ : List[Any] = mask_token_id
snake_case__ : Optional[Any] = lang_id
if "n_words" in kwargs:
snake_case__ : Any = kwargs["""n_words"""]
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , **snake_case_ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
@property
def lowerCamelCase ( self : Dict ):
if self.task == "multiple-choice":
snake_case__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case__ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 35 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : int = 8
# DPR tok
_lowerCAmelCase : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(__a, exist_ok=__a)
_lowerCAmelCase : str = os.path.join(__a, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
_lowerCAmelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_lowerCAmelCase : Optional[int] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCAmelCase : Any = {"unk_token": "<unk>"}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(__a, exist_ok=__a)
_lowerCAmelCase : str = os.path.join(__a, BART_VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Tuple = os.path.join(__a, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
def snake_case__ ( self):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def snake_case__ ( self):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = os.path.join(self.tmpdirname, "rag_tokenizer")
_lowerCAmelCase : str = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict())
_lowerCAmelCase : Dict = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer())
rag_config.save_pretrained(__a)
rag_tokenizer.save_pretrained(__a)
_lowerCAmelCase : Optional[int] = RagTokenizer.from_pretrained(__a, config=__a)
self.assertIsInstance(new_rag_tokenizer.question_encoder, __a)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator, __a)
self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab())
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = RagTokenizer.from_pretrained("facebook/rag-token-nq")
_lowerCAmelCase : int = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
_lowerCAmelCase : Tuple = tokenizer(__a)
self.assertIsNotNone(__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
_lowerCAmelCase : Optional[Any] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
_lowerCAmelCase : Dict = tokenizer(__a)
self.assertIsNotNone(__a)
| 36 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 0 |
import os
def __A ( __lowerCAmelCase = "input.txt" )-> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) as input_file:
_UpperCAmelCase = [
[int(__lowerCAmelCase ) for element in line.split(',' )]
for line in input_file.readlines()
]
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = len(matrix[0] )
_UpperCAmelCase = [[-1 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
_UpperCAmelCase = matrix[i][0]
for j in range(1 , __lowerCAmelCase ):
for i in range(__lowerCAmelCase ):
_UpperCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowerCAmelCase ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 39 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """roberta"""
def __init__( self : Tuple , __UpperCAmelCase : Tuple=50265 , __UpperCAmelCase : Dict=768 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Optional[int]="absolute" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : str , ):
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : int = vocab_size
a : Optional[Any] = hidden_size
a : Any = num_hidden_layers
a : Dict = num_attention_heads
a : Dict = hidden_act
a : int = intermediate_size
a : str = hidden_dropout_prob
a : List[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : str = type_vocab_size
a : Optional[int] = initializer_range
a : Optional[Any] = layer_norm_eps
a : Optional[Any] = position_embedding_type
a : Any = use_cache
a : List[str] = classifier_dropout
class _A ( _a ):
"""simple docstring"""
@property
def __snake_case ( self : int):
if self.task == "multiple-choice":
a : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 40 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Optional[int] =torch.device('''cpu''')
def SCREAMING_SNAKE_CASE_ () -> int:
lowerCamelCase__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : str = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[str] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : Any = []
for k in state_dict.keys():
lowerCamelCase__ : List[Any] = k
if ".pwconv" in k:
lowerCamelCase__ : List[str] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
lowerCamelCase__ : str = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
lowerCamelCase__ : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
lowerCamelCase__ : Any = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
lowerCamelCase__ : Union[str, Any] = k_new.split(""".""" )
if ls[2].isdigit():
lowerCamelCase__ : Optional[Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
lowerCamelCase__ : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : str = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase__ : Optional[int] = 1000
lowerCamelCase__ : int = """huggingface/label-files"""
lowerCamelCase__ : Any = """imagenet-1k-id2label.json"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Optional[Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase__ : str = [3, 3, 6, 4]
lowerCamelCase__ : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase__ : List[Any] = [3, 3, 9, 6]
lowerCamelCase__ : Optional[int] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase__ : int = [4, 3, 10, 5]
lowerCamelCase__ : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase__ : Optional[int] = [4, 4, 12, 6]
lowerCamelCase__ : str = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
lowerCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" , check_hash=UpperCamelCase )
else:
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : str = checkpoint
lowerCamelCase__ : List[str] = create_rename_keys(UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load HuggingFace model
lowerCamelCase__ : List[str] = SwiftFormerForImageClassification(UpperCamelCase ).eval()
hf_model.load_state_dict(UpperCamelCase )
# prepare test inputs
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Optional[int] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
lowerCamelCase__ : Union[str, Any] = processor(images=UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
lowerCamelCase__ : Tuple = get_expected_output(UpperCamelCase )
lowerCamelCase__ : str = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase , atol=1E-3 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_A : int =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 41 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=36 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = embedding_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_hidden_groups
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = AlbertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = AlbertForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , sentence_order_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = AlbertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = AlbertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = AlbertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = AlbertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_choices
_snake_case = AlbertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase = True
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AlbertModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = AlbertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AlbertModel.from_pretrained('albert-base-v2' )
_snake_case = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_snake_case = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) )
| 42 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowercase = '''CompVis/stable-diffusion-v1-1'''
__lowercase = '''CompVis/stable-diffusion-v1-2'''
__lowercase = '''CompVis/stable-diffusion-v1-3'''
__lowercase = '''CompVis/stable-diffusion-v1-4'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> Union[str, Any]:
super()._init_()
__UpperCamelCase :Dict = StableDiffusionPipeline.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = StableDiffusionPipeline.from_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = StableDiffusionPipeline.from_pretrained(__lowercase)
__UpperCamelCase :Tuple = StableDiffusionPipeline(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , requires_safety_checker=__lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def UpperCamelCase__ ( self) -> Dict[str, Any]:
return {k: getattr(self , __lowercase) for k in self.config.keys() if not k.startswith('''_''')}
def UpperCamelCase__ ( self , __lowercase = "auto") -> str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCamelCase :Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowercase)
def UpperCamelCase__ ( self) -> str:
self.enable_attention_slicing(__lowercase)
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Union[str, Any]:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> int:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> str:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> str:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Optional[int]:
__UpperCamelCase :Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__lowercase)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""")
# Get first result from Stable Diffusion Checkpoint v1.1
__UpperCamelCase :Tuple = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__UpperCamelCase :Any = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__UpperCamelCase :Dict = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__UpperCamelCase :Optional[Any] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 43 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
__a = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self , _a , _a , _a ):
if len(_a ) == 0 or len(_a ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(_a ) )
if isinstance(_a , _a ):
__a = [sequences]
__a = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=ZeroShotClassificationArgumentHandler() , *_a , **_a ):
__a = args_parser
super().__init__(*_a , **_a )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def __UpperCAmelCase ( self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def __UpperCAmelCase ( self , _a , _a=True , _a=True , _a=TruncationStrategy.ONLY_FIRST , **_a ):
__a = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
__a = self.tokenizer.eos_token
try:
__a = self.tokenizer(
_a , add_special_tokens=_a , return_tensors=_a , padding=_a , truncation=_a , )
except Exception as e:
if "too short" in str(_a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__a = self.tokenizer(
_a , add_special_tokens=_a , return_tensors=_a , padding=_a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __UpperCAmelCase ( self , **_a ):
if kwargs.get('''multi_class''' , _a ) is not None:
__a = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
__a = {}
if "candidate_labels" in kwargs:
__a = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
__a = {}
if "multi_label" in kwargs:
__a = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , _a , *_a , **_a , ):
if len(_a ) == 0:
pass
elif len(_a ) == 1 and "candidate_labels" not in kwargs:
__a = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , _a , _a=None , _a="This example is {}." ):
__a , __a = self._args_parser(_a , _a , _a )
for i, (candidate_label, sequence_pair) in enumerate(zip(_a , _a ) ):
__a = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_a ) - 1,
**model_input,
}
def __UpperCAmelCase ( self , _a ):
__a = inputs['''candidate_label''']
__a = inputs['''sequence''']
__a = {k: inputs[k] for k in self.tokenizer.model_input_names}
__a = self.model(**_a )
__a = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def __UpperCAmelCase ( self , _a , _a=False ):
__a = [outputs['''candidate_label'''] for outputs in model_outputs]
__a = [outputs['''sequence'''] for outputs in model_outputs]
__a = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
__a = logits.shape[0]
__a = len(_a )
__a = N // n
__a = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__a = self.entailment_id
__a = -1 if entailment_id == 0 else 0
__a = reshaped_outputs[..., [contradiction_id, entailment_id]]
__a = np.exp(_a ) / np.exp(_a ).sum(-1 , keepdims=_a )
__a = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__a = reshaped_outputs[..., self.entailment_id]
__a = np.exp(_a ) / np.exp(_a ).sum(-1 , keepdims=_a )
__a = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 45 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , ) -> Any:
lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MobileNetVaImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def _snake_case ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
self.assertTrue(hasattr(lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowercase , """crop_size""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self ) -> Tuple:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 46 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : List[Any] = logging.getLogger()
def _lowerCAmelCase ( _UpperCamelCase : Path , _UpperCamelCase : list ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='\n'.join(_UpperCamelCase )
Path(_UpperCamelCase ).open('w' ).writelines(_UpperCamelCase )
lowerCamelCase : Tuple = "patrickvonplaten/t5-tiny-random"
lowerCamelCase : Tuple = "sshleifer/bart-tiny-random"
lowerCamelCase : List[Any] = "sshleifer/tiny-mbart"
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class A__ ( A__ ):
def A ( self : Any , _a : Dict ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_SCREAMING_SNAKE_CASE =input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_SCREAMING_SNAKE_CASE =[' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(_a , _a )
_SCREAMING_SNAKE_CASE =str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_SCREAMING_SNAKE_CASE ='translation_en_to_de' if model == T5_TINY else 'summarization'
_SCREAMING_SNAKE_CASE =f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(_a , 'argv' , _a ):
run_generate()
assert Path(_a ).exists()
# os.remove(Path(output_file_name))
def A ( self : List[str] ) -> str:
'''simple docstring'''
self.run_eval_tester(_a )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A ( self : Optional[Any] , _a : Tuple ) -> List[str]:
'''simple docstring'''
self.run_eval_tester(_a )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A ( self : Dict , _a : Dict ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_SCREAMING_SNAKE_CASE =input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_SCREAMING_SNAKE_CASE ={
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() )
_SCREAMING_SNAKE_CASE =str(tmp_dir / 'scores.json' )
_SCREAMING_SNAKE_CASE =str(tmp_dir / 'val.target' )
_dump_articles(_a , text['en'] )
_dump_articles(_a , text['de'] )
_SCREAMING_SNAKE_CASE ='translation_en_to_de' if model == T5_TINY else 'summarization'
_SCREAMING_SNAKE_CASE =f"\n run_eval_search.py\n {model}\n {str(_a )}\n {str(_a )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(_a , 'argv' , _a ):
with CaptureStdout() as cs:
run_search()
_SCREAMING_SNAKE_CASE =[' num_beams | length_penalty', model, 'Best score args']
_SCREAMING_SNAKE_CASE =['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(_a )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_a ).exists()
os.remove(Path(_a ) )
| 47 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = """openai-gpt"""
lowerCamelCase_ : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase__=4_0478 , UpperCamelCase__=512 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__="cls_index" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=0.1 , **UpperCamelCase__ , ) -> Optional[int]:
lowerCamelCase : str = vocab_size
lowerCamelCase : str = n_positions
lowerCamelCase : List[Any] = n_embd
lowerCamelCase : int = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : Tuple = afn
lowerCamelCase : List[str] = resid_pdrop
lowerCamelCase : Any = embd_pdrop
lowerCamelCase : Union[str, Any] = attn_pdrop
lowerCamelCase : Union[str, Any] = layer_norm_epsilon
lowerCamelCase : Any = initializer_range
lowerCamelCase : Any = summary_type
lowerCamelCase : int = summary_use_proj
lowerCamelCase : Union[str, Any] = summary_activation
lowerCamelCase : Union[str, Any] = summary_first_dropout
lowerCamelCase : Union[str, Any] = summary_proj_to_labels
super().__init__(**UpperCamelCase__ )
| 48 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334 | 0 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str=100 , __SCREAMING_SNAKE_CASE : int=13 , __SCREAMING_SNAKE_CASE : Dict=30 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : List[str]=37 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=3 , ):
'''simple docstring'''
__a = parent
__a = vocab_size
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = FlaxBeitModel(config=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = FlaxBeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size))
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = self.type_sequence_label_size
__a = FlaxBeitForImageClassification(config=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__a = 1
__a = FlaxBeitForImageClassification(__SCREAMING_SNAKE_CASE)
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__a = model(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = FlaxBeitModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model_class(__SCREAMING_SNAKE_CASE)
@jax.jit
def model_jitted(__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Optional[int]):
return model(pixel_values=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
with self.subTest('''JIT Enabled'''):
__a = model_jitted(**__SCREAMING_SNAKE_CASE).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
__a = model_jitted(**__SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , len(__SCREAMING_SNAKE_CASE))
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(jitted_output.shape , output.shape)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''')
__a = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def __snake_case ( ):
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''') if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''')
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''').pixel_values
# prepare bool_masked_pos
__a = np.ones((1, 196) , dtype=__SCREAMING_SNAKE_CASE)
# forward pass
__a = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE)
__a = outputs.logits
# verify the logits
__a = (1, 196, 8_192)
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE)
__a = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]])
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-2))
@slow
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''')
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
# forward pass
__a = model(**__SCREAMING_SNAKE_CASE)
__a = outputs.logits
# verify the logits
__a = (1, 1_000)
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE)
__a = np.array([-1.23_85, -1.09_87, -1.01_08])
self.assertTrue(np.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
__a = 281
self.assertEqual(logits.argmax(-1).item() , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''')
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
# forward pass
__a = model(**__SCREAMING_SNAKE_CASE)
__a = outputs.logits
# verify the logits
__a = (1, 21_841)
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE)
__a = np.array([1.68_81, -0.27_87, 0.59_01])
self.assertTrue(np.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
__a = 2_396
self.assertEqual(logits.argmax(-1).item() , __SCREAMING_SNAKE_CASE)
| 49 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ) -> int:
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase__ : Union[str, Any] = timm.create_model('levit_128s' , pretrained=_UpperCAmelCase )
else:
lowerCamelCase__ : int = timm.create_model('levit_128' , pretrained=_UpperCAmelCase )
if hidden_sizes == 192:
lowerCamelCase__ : List[str] = timm.create_model('levit_192' , pretrained=_UpperCAmelCase )
if hidden_sizes == 256:
lowerCamelCase__ : List[Any] = timm.create_model('levit_256' , pretrained=_UpperCAmelCase )
if hidden_sizes == 384:
lowerCamelCase__ : int = timm.create_model('levit_384' , pretrained=_UpperCAmelCase )
from_model.eval()
lowerCamelCase__ : Optional[Any] = LevitForImageClassificationWithTeacher(_UpperCAmelCase ).eval()
lowerCamelCase__ : List[str] = OrderedDict()
lowerCamelCase__ : str = from_model.state_dict()
lowerCamelCase__ : Optional[Any] = list(from_model.state_dict().keys() )
lowerCamelCase__ : str = list(our_model.state_dict().keys() )
print(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : List[Any] = weights[og_keys[i]]
our_model.load_state_dict(_UpperCAmelCase )
lowerCamelCase__ : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase__ : Optional[int] = from_model(_UpperCAmelCase )
lowerCamelCase__ : Any = our_model(_UpperCAmelCase ).logits
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ), "The model logits don't match the original one."
lowerCamelCase__ : Optional[Any] = name
print(_UpperCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ) -> List[str]:
lowerCamelCase__ : Optional[int] = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Tuple = 1000
lowerCamelCase__ : Union[str, Any] = (1, num_labels)
lowerCamelCase__ : List[str] = 'huggingface/label-files'
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Tuple = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : List[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] = idalabel
lowerCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
lowerCamelCase__ : int = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
lowerCamelCase__ : int = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 50 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 0 |
def A (__A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCAmelCase_ = hex_num[0] == '''-'''
if is_negative:
UpperCAmelCase_ = hex_num[1:]
try:
UpperCAmelCase_ = int(__A , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCAmelCase_ = ''''''
while int_num > 0:
UpperCAmelCase_ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> int: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> str: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[int] = [1, 2]
UpperCamelCase : Union[str, Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Optional[Any] = {"a": {"1": 1}, "b": 2}
UpperCamelCase : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : List[str] = []
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : str = [2, 3]
UpperCamelCase : str = {"a": 2, "b": 3}
UpperCamelCase : Optional[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : List[str] = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : Any = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : Optional[int] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : Optional[Any] = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Optional[Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"a": 1, "b": 2}
UpperCamelCase : Union[str, Any] = {"a": 3, "b": 4}
UpperCamelCase : Optional[int] = {"a": 5, "b": 6}
UpperCamelCase : Tuple = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :int = 'bar'
UpperCamelCase : Dict = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : int = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : Optional[int] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : Dict = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[int] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[str] = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Any = torch.nn.Linear(3 , 2 )
UpperCamelCase : Union[str, Any] = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : List[str] = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : str = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Tuple = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : int = gen_random_output()
UpperCamelCase : Any = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Union[str, Any] = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : Dict = A(x=1 , y="foobar" )
UpperCamelCase : Optional[Any] = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : Tuple = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : str = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Any:
return text.split()
def A_ ( _lowerCAmelCase ) -> Optional[int]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> int:
with Pool(2 ) as pool:
UpperCamelCase : Optional[Any] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Tuple = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : List[str] = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 52 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
__UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase = tokenizer('Hello there' , return_tensors='tf' ).input_ids
__UpperCamelCase = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
__UpperCamelCase = model(__A , labels=__A ).loss
__UpperCamelCase = -tf.math.reduce_mean(__A ).numpy()
__UpperCamelCase = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 53 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a__ : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "The column name of the images in the files."})
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "A folder containing the training data."})
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "A folder containing the validation data."})
snake_case__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."})
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE = self.validation_dir
__SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"})
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : str = field(default=UpperCamelCase , metadata={"help": "Name or path of preprocessor config."})
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."})
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether or not to train with normalized pixel values as target."})
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : float = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."})
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE = ds["train"].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE = split["train"]
__SCREAMING_SNAKE_CASE = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(lowerCAmelCase_ )
if training_args.do_train:
__SCREAMING_SNAKE_CASE = ds["train"].column_names
else:
__SCREAMING_SNAKE_CASE = ds["validation"].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE = "image"
elif "img" in column_names:
__SCREAMING_SNAKE_CASE = "img"
else:
__SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__SCREAMING_SNAKE_CASE = image_processor.size["shortest_edge"]
else:
__SCREAMING_SNAKE_CASE = (image_processor.size["height"], image_processor.size["width"])
__SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda lowerCAmelCase_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowerCAmelCase_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [transforms(lowerCAmelCase_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCAmelCase_ )
# Compute absolute learning rate
__SCREAMING_SNAKE_CASE = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 54 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Tuple = logging.get_logger(__name__)
set_seed(770)
a_ : List[str] = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
a_ : int = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
a_ : Optional[Any] = os.path.dirname(os.path.abspath(__file__))
a_ : Optional[int] = os.path.join(os.path.expanduser("""~"""), """.cache""")
a_ : Optional[Any] = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=False ):
lowerCamelCase_ = model_type
if use_small:
key += "_small"
return os.path.join(UpperCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ):
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
hf_hub_download(repo_id=UpperCAmelCase_ , filename=UpperCAmelCase_ , local_dir=UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Dict="text" ):
if model_type == "text":
lowerCamelCase_ = BarkSemanticModel
lowerCamelCase_ = BarkSemanticConfig
lowerCamelCase_ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCamelCase_ = BarkCoarseModel
lowerCamelCase_ = BarkCoarseConfig
lowerCamelCase_ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCamelCase_ = BarkFineModel
lowerCamelCase_ = BarkFineConfig
lowerCamelCase_ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCamelCase_ = F'''{model_type}_small''' if use_small else model_type
lowerCamelCase_ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCAmelCase_ ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["repo_id"] , model_info["file_name"] )
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
# this is a hack
lowerCamelCase_ = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
lowerCamelCase_ = model_args["vocab_size"]
lowerCamelCase_ = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCamelCase_ = model_args.pop("n_head" )
lowerCamelCase_ = model_args.pop("n_embd" )
lowerCamelCase_ = model_args.pop("n_layer" )
lowerCamelCase_ = ConfigClass(**checkpoint["model_args"] )
lowerCamelCase_ = ModelClass(config=UpperCAmelCase_ )
lowerCamelCase_ = GenerationConfigClass()
lowerCamelCase_ = model_generation_config
lowerCamelCase_ = checkpoint["model"]
# fixup checkpoint
lowerCamelCase_ = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(UpperCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCamelCase_ = k[len(UpperCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
lowerCamelCase_ = new_k.replace(UpperCAmelCase_ , new_layer_name_dict[old_layer_name] )
lowerCamelCase_ = state_dict.pop(UpperCAmelCase_ )
lowerCamelCase_ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCamelCase_ = {k for k in extra_keys if not k.endswith(".attn.bias" )}
lowerCamelCase_ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCamelCase_ = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
lowerCamelCase_ = model.num_parameters(exclude_embeddings=UpperCAmelCase_ )
lowerCamelCase_ = checkpoint["best_val_loss"].item()
logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(UpperCAmelCase_ , 3 )} loss''' )
model.eval()
model.to(UpperCAmelCase_ )
del checkpoint, state_dict
return model
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCamelCase_ = "cpu" # do conversion on cpu
lowerCamelCase_ = _get_ckpt_path(UpperCAmelCase_ , use_small=UpperCAmelCase_ )
lowerCamelCase_ = _load_model(UpperCAmelCase_ , UpperCAmelCase_ , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
# load bark initial model
lowerCamelCase_ = _bark_load_model(UpperCAmelCase_ , "cpu" , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
if model_type == "text":
lowerCamelCase_ = bark_model["model"]
if model.num_parameters(exclude_embeddings=UpperCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
lowerCamelCase_ = 5
lowerCamelCase_ = 10
if model_type in ["text", "coarse"]:
lowerCamelCase_ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCamelCase_ = bark_model(UpperCAmelCase_ )[0]
lowerCamelCase_ = model(UpperCAmelCase_ )
# take last logits
lowerCamelCase_ = output_new_model_total.logits[:, [-1], :]
else:
lowerCamelCase_ = 3
lowerCamelCase_ = 8
lowerCamelCase_ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCamelCase_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = bark_model(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , ):
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase_ , "config.json" ) )
lowerCamelCase_ = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase_ , "config.json" ) )
lowerCamelCase_ = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase_ , "config.json" ) )
lowerCamelCase_ = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase_ = BarkSemanticModel.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = BarkCoarseModel.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = BarkFineModel.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = EncodecModel.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase_ = BarkConfig.from_sub_model_configs(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCamelCase_ = BarkModel(UpperCAmelCase_ )
lowerCamelCase_ = semantic
lowerCamelCase_ = coarseAcoustic
lowerCamelCase_ = fineAcoustic
lowerCamelCase_ = codec
lowerCamelCase_ = bark_generation_config
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
bark.save_pretrained(UpperCAmelCase_ , repo_id=UpperCAmelCase_ , push_to_hub=UpperCAmelCase_ )
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
a_ : Optional[int] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 55 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class a ( _lowerCamelCase ):
def A_ ( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : List[Any]=None , **lowercase_ : Optional[Any] ):
if tokenize_kwargs is None:
snake_case_ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
snake_case_ = truncation
snake_case_ = tokenize_kwargs
snake_case_ = {}
if return_tensors is not None:
snake_case_ = return_tensors
return preprocess_params, {}, postprocess_params
def A_ ( self : List[str] , lowercase_ : List[str] , **lowercase_ : Tuple ):
snake_case_ = self.framework
snake_case_ = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
return model_inputs
def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] ):
snake_case_ = self.model(**lowercase_ )
return model_outputs
def A_ ( self : Any , lowercase_ : Tuple , lowercase_ : Optional[int]=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Any , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ):
return super().__call__(*lowercase_ , **lowercase_ )
| 56 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
"""simple docstring"""
import re
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
try:
__lowerCAmelCase = split_input(_UpperCamelCase )
if upper:
__lowerCAmelCase = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__lowerCAmelCase = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return to_simple_case(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
try:
__lowerCAmelCase = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , "_" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 57 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
'''simple docstring'''
from math import pi
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) ->float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 58 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
snake_case , snake_case : Union[str, Any] = 9, 14 # noqa: F841
snake_case : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case : int = defaultdict(__lowerCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case : Any = mst(__lowerCamelCase )
snake_case : Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case : Union[str, Any] = tuple(answer[:2] )
snake_case : Tuple = tuple(edge[::-1] )
assert edge in result or reverse in result
| 59 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 60 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def __a ( __lowerCamelCase = "." ):
for dir_path, dir_names, filenames in os.walk(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCamelCase, __lowerCamelCase ).lstrip("./" )
def __a ( __lowerCamelCase ):
return f"""{i * " "}*""" if i else "\n##"
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(__lowerCamelCase )} {new_part.replace("_", " " ).title()}""" )
return new_path
def __a ( __lowerCamelCase = "." ):
UpperCAmelCase_ : Dict = ""
for filepath in sorted(good_file_paths(__lowerCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ : str = os.path.split(__lowerCamelCase )
if filepath != old_path:
UpperCAmelCase_ : Any = print_path(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[str] = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCAmelCase_ : Dict = f"""{filepath}/{filename}""".replace(" ", "%20" )
UpperCAmelCase_ : int = os.path.splitext(filename.replace("_", " " ).title() )[0]
print(f"""{md_prefix(__lowerCamelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 61 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_A = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_ , A_ , A_ = None , A_ = None ) -> Optional[Any]:
__UpperCamelCase =None
__UpperCamelCase =os.path.abspath(os.path.join('examples' , 'by_feature' ) )
__UpperCamelCase =os.path.abspath('examples' )
for item in os.listdir(A_ ):
if item not in EXCLUDE_EXAMPLES:
__UpperCamelCase =os.path.join(A_ , A_ )
if os.path.isfile(A_ ) and ".py" in item_path:
with self.subTest(
tested_script=A_ , feature_script=A_ , tested_section='main()' if parser_only else 'training_function()' , ):
__UpperCamelCase =compare_against_test(
os.path.join(A_ , A_ ) , A_ , A_ , A_ )
__UpperCamelCase ='\n'.join(A_ )
if special_strings is not None:
for string in special_strings:
__UpperCamelCase =diff.replace(A_ , '' )
self.assertEqual(A_ , '' )
def _a ( self ) -> Dict:
self.one_complete_example('complete_nlp_example.py' , A_ )
self.one_complete_example('complete_nlp_example.py' , A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
__UpperCamelCase =[
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , A_ , A_ , A_ )
self.one_complete_example('complete_cv_example.py' , A_ , A_ , A_ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = False
@classmethod
def _a ( cls ) -> Union[str, Any]:
super().setUpClass()
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase =['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def _a ( cls ) -> Union[str, Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _a ( self ) -> List[Any]:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__UpperCamelCase =run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ )
self.assertNotIn('epoch 0:' , A_ )
self.assertIn('epoch 1:' , A_ )
def _a ( self ) -> int:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ )
if torch.cuda.is_available():
__UpperCamelCase =torch.cuda.device_count()
else:
__UpperCamelCase =1
if num_processes > 1:
self.assertNotIn('epoch 0:' , A_ )
self.assertIn('epoch 1:' , A_ )
else:
self.assertIn('epoch 0:' , A_ )
self.assertIn('epoch 1:' , A_ )
@slow
def _a ( self ) -> Optional[Any]:
__UpperCamelCase ='\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
__UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ )
__UpperCamelCase =re.findall('({.+})' , A_ )
__UpperCamelCase =[r for r in results if 'accuracy' in r][-1]
__UpperCamelCase =ast.literal_eval(A_ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def _a ( self ) -> str:
__UpperCamelCase =['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCamelCase =f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A_ , 'tracking' ) ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def _a ( self ) -> List[Any]:
__UpperCamelCase =['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 62 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : dict , lowercase : str ) -> set[str]:
_a , _a = set(lowercase ), [start]
while stack:
_a = stack.pop()
explored.add(lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowercase )
return explored
lowerCAmelCase_ : Dict = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ComputeEnvironment.AMAZON_SAGEMAKER
lowercase__ = True
lowercase__ = "ml.p3.2xlarge"
lowercase__ = "accelerate_sagemaker_execution_role"
lowercase__ = "hf-sm"
lowercase__ = "us-east-1"
lowercase__ = 1
lowercase__ = "accelerate-sagemaker-1"
lowercase__ = "1.6"
lowercase__ = "4.4"
lowercase__ = "train.py"
lowercase__ = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
lowercase__ = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""], a_ )
assert isinstance(converted_args["""do_train"""], a_ )
assert isinstance(converted_args["""epochs"""], a_ )
assert isinstance(converted_args["""learning_rate"""], a_ )
assert isinstance(converted_args["""max_steps"""], a_ )
with pytest.raises(a_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 64 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
return EnvironmentCommand()
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class A ( UpperCAmelCase_ ):
@staticmethod
def lowercase_ (__UpperCAmelCase : ArgumentParser ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = parser.add_parser("env" )
download_parser.set_defaults(func=__UpperCAmelCase )
download_parser.add_argument(
"--accelerate-config_file" , default=__UpperCAmelCase , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=__UpperCAmelCase )
def __init__(self : Optional[int] , __UpperCAmelCase : str , *__UpperCAmelCase : Tuple ) -> None:
"""simple docstring"""
UpperCAmelCase__ = accelerate_config_file
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "not installed"
if is_safetensors_available():
import safetensors
UpperCAmelCase__ = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
UpperCAmelCase__ = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
UpperCAmelCase__ = "not installed"
UpperCAmelCase__ = UpperCAmelCase__ = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
UpperCAmelCase__ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__UpperCAmelCase ):
UpperCAmelCase__ = load_config_from_file(self._accelerate_config_file ).to_dict()
UpperCAmelCase__ = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else f"""\t{accelerate_config}"""
)
UpperCAmelCase__ = "not installed"
UpperCAmelCase__ = "NA"
if is_torch_available():
import torch
UpperCAmelCase__ = torch.__version__
UpperCAmelCase__ = torch.cuda.is_available()
UpperCAmelCase__ = "not installed"
UpperCAmelCase__ = "NA"
if is_tf_available():
import tensorflow as tf
UpperCAmelCase__ = tf.__version__
try:
# deprecated in v2.1
UpperCAmelCase__ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
UpperCAmelCase__ = bool(tf.config.list_physical_devices("GPU" ) )
UpperCAmelCase__ = "not installed"
UpperCAmelCase__ = "not installed"
UpperCAmelCase__ = "not installed"
UpperCAmelCase__ = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
UpperCAmelCase__ = flax.__version__
UpperCAmelCase__ = jax.__version__
UpperCAmelCase__ = jaxlib.__version__
UpperCAmelCase__ = jax.lib.xla_bridge.get_backend().platform
UpperCAmelCase__ = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f"""{safetensors_version}""",
"Accelerate version": f"""{accelerate_version}""",
"Accelerate config": f"""{accelerate_config_str}""",
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": f"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": f"""{flax_version} ({jax_backend})""",
"Jax version": f"""{jax_version}""",
"JaxLib version": f"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__UpperCAmelCase ) )
return info
@staticmethod
def lowercase_ (__UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 65 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: str = "▁" , snake_case: bool = True , snake_case: Union[str, AddedToken] = "<unk>" , snake_case: Union[str, AddedToken] = "</s>" , snake_case: Union[str, AddedToken] = "<pad>" , ) -> Any:
snake_case_ :Any = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
snake_case_ :Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ :Tuple = token_dict["""token"""]
snake_case_ :Union[str, Any] = Tokenizer(Unigram() )
snake_case_ :Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
snake_case_ :str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=snake_case , add_prefix_space=snake_case ),
pre_tokenizers.Digits(individual_digits=snake_case ),
pre_tokenizers.Punctuation(),
] )
snake_case_ :Dict = decoders.Metaspace(replacement=snake_case , add_prefix_space=snake_case )
snake_case_ :str = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
snake_case_ :Tuple = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(snake_case , snake_case )
def lowerCAmelCase_ ( self: Dict , snake_case: Union[str, List[str]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> int:
snake_case_ :List[Any] = trainers.UnigramTrainer(
vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , )
if isinstance(snake_case , snake_case ):
snake_case_ :int = [files]
self._tokenizer.train(snake_case , trainer=snake_case )
self.add_unk_id()
def lowerCAmelCase_ ( self: Dict , snake_case: Union[Iterator[str], Iterator[Iterator[str]]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> List[str]:
snake_case_ :Optional[Any] = trainers.UnigramTrainer(
vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , )
self._tokenizer.train_from_iterator(snake_case , trainer=snake_case )
self.add_unk_id()
def lowerCAmelCase_ ( self: List[Any] ) -> Tuple:
snake_case_ :Dict = json.loads(self._tokenizer.to_str() )
snake_case_ :Optional[int] = self.special_tokens["""unk"""]["""id"""]
snake_case_ :Optional[int] = Tokenizer.from_str(json.dumps(snake_case ) )
| 66 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "vocab.json"}
__UpperCAmelCase ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
__UpperCAmelCase ={"mgp-str": 2_7}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , a : str , a : Optional[int]="[GO]" , a : int="[GO]" , a : Union[str, Any]="[s]" , a : Union[str, Any]="[GO]" , **a : Optional[int] ):
"""simple docstring"""
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , **a , )
with open(a , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(a )
__lowerCamelCase = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return len(self.vocab )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for s in text:
char_tokens.extend(a )
return char_tokens
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : List[Any] ):
"""simple docstring"""
return self.vocab.get(a , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : int , a : Any ):
"""simple docstring"""
return self.decoder.get(a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(a ) )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=a , ensure_ascii=a ) + '''\n''' )
return (vocab_file,)
| 67 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
from __future__ import annotations
lowerCAmelCase__ = list[list[int]]
# assigning initial values to the grid
lowerCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Matrix , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(SCREAMING_SNAKE_CASE_ ):
A__ , A__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = digit
if sudoku(SCREAMING_SNAKE_CASE_ ) is not None:
return grid
A__ = 0
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(SCREAMING_SNAKE_CASE_ , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowerCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 68 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> List[str]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Tuple:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 69 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A__ : Optional[Any] =logging.getLogger(__name__)
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowerCAmelCase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowerCAmelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowerCAmelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowerCAmelCase , default="""data/dump""" , help="""The dump file prefix.""" )
_lowerCAmelCase = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
_lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
_lowerCAmelCase = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
_lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
_lowerCAmelCase = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
_lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
_lowerCAmelCase = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
_lowerCAmelCase = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"{len(lowerCAmelCase )} examples to process." )
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = 1_00_00
_lowerCAmelCase = time.time()
for text in data:
_lowerCAmelCase = f"{bos} {text.strip()} {sep}"
_lowerCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
rslt.append(lowerCAmelCase )
iter += 1
if iter % interval == 0:
_lowerCAmelCase = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
_lowerCAmelCase = time.time()
logger.info("""Finished binarization""" )
logger.info(f"{len(lowerCAmelCase )} examples processed." )
_lowerCAmelCase = f"{args.dump_file}.{args.tokenizer_name}.pickle"
_lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
_lowerCAmelCase = [np.uintaa(lowerCAmelCase ) for d in rslt]
else:
_lowerCAmelCase = [np.intaa(lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(lowerCAmelCase , """wb""" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 70 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =tempfile.mkdtemp()
__UpperCamelCase : int =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__UpperCamelCase : Any ={
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__UpperCamelCase : Optional[int] =os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCamelCase : str =[Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.get_tokenizer()
__UpperCamelCase : List[Any] =self.get_rust_tokenizer()
__UpperCamelCase : Optional[Any] =self.get_image_processor()
__UpperCamelCase : Any =AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase : Any =AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase : List[Any] =AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase : int =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase : Optional[int] =self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
__UpperCamelCase : List[Any] =AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.get_image_processor()
__UpperCamelCase : int =self.get_tokenizer()
__UpperCamelCase : List[Any] =AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =self.prepare_image_inputs()
__UpperCamelCase : Union[str, Any] =image_processor(lowerCamelCase__ , return_tensors='np' )
__UpperCamelCase : Union[str, Any] =processor(images=lowerCamelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =self.get_image_processor()
__UpperCamelCase : Dict =self.get_tokenizer()
__UpperCamelCase : List[Any] =AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] ='lower newer'
__UpperCamelCase : Union[str, Any] =processor(text=lowerCamelCase__ )
__UpperCamelCase : List[str] =tokenizer(lowerCamelCase__ , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.get_image_processor()
__UpperCamelCase : Tuple =self.get_tokenizer()
__UpperCamelCase : Optional[int] =AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ='lower newer'
__UpperCamelCase : str =self.prepare_image_inputs()
__UpperCamelCase : Optional[Any] =processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.get_image_processor()
__UpperCamelCase : Dict =self.get_tokenizer()
__UpperCamelCase : int =AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCamelCase : int =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase : int =processor.batch_decode(lowerCamelCase__ )
__UpperCamelCase : Dict =tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.get_image_processor()
__UpperCamelCase : Tuple =self.get_tokenizer()
__UpperCamelCase : List[str] =AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCamelCase : Tuple ='lower newer'
__UpperCamelCase : List[Any] =self.prepare_image_inputs()
__UpperCamelCase : List[Any] =processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 71 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCAmelCase__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCAmelCase__ = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
lowerCAmelCase__ = '''|'''.join(sys.argv[1:])
lowerCAmelCase__ = re.compile(RF"""^({joined_dirs}).*?\.py$""")
lowerCAmelCase__ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 72 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__="" ) -> str:
__lowerCamelCase : str = tempfile.mkdtemp()
return os.path.join(lowerCamelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : int = torch.rand(1_2 ,dtype=torch.floataa) - 0.5
__lowerCamelCase : Dict = AgentAudio(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,agent_type.to_raw() ,atol=1E-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE__))
# Ensure that the file contains the same value as the original tensor
__lowerCamelCase , __lowerCamelCase : Optional[Any] = sf.read(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,torch.tensor(SCREAMING_SNAKE_CASE__) ,atol=1E-4))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = torch.rand(1_2 ,dtype=torch.floataa) - 0.5
__lowerCamelCase : Optional[Any] = get_new_path(suffix='.wav')
sf.write(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,1_6_0_0_0)
__lowerCamelCase : Optional[Any] = AgentAudio(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,agent_type.to_raw() ,atol=1E-4))
self.assertEqual(agent_type.to_string() ,SCREAMING_SNAKE_CASE__)
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = torch.randint(0 ,2_5_6 ,(6_4, 6_4, 3))
__lowerCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,agent_type._tensor ,atol=1E-4))
self.assertIsInstance(agent_type.to_raw() ,Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = Path(get_tests_dir('fixtures/tests_samples/COCO')) / '000000039769.png'
__lowerCamelCase : Optional[Any] = Image.open(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = AgentImage(SCREAMING_SNAKE_CASE__)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Any):
__lowerCamelCase : int = Path(get_tests_dir('fixtures/tests_samples/COCO')) / '000000039769.png'
__lowerCamelCase : List[Any] = Image.open(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = AgentImage(SCREAMING_SNAKE_CASE__)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE__))
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = 'Hey!'
__lowerCamelCase : List[str] = AgentText(SCREAMING_SNAKE_CASE__)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,agent_type.to_string())
self.assertEqual(SCREAMING_SNAKE_CASE__ ,agent_type.to_raw())
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowercase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _snake_case ( snake_case__ : str , snake_case__ : Dict=100 , snake_case__ : int=" " ):
A = text.split(snake_case__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case__ ) , snake_case__ )]
def _snake_case ( snake_case__ : dict ):
A , A = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(snake_case__ ):
titles.append(title if title is not None else '' )
texts.append(snake_case__ )
return {"title": titles, "text": texts}
def _snake_case ( snake_case__ : dict , snake_case__ : DPRContextEncoder , snake_case__ : DPRContextEncoderTokenizerFast ):
A = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=snake_case__ , padding='longest' , return_tensors='pt' )['input_ids']
A = ctx_encoder(input_ids.to(device=snake_case__ ) , return_dict=snake_case__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _snake_case ( snake_case__ : "RagExampleArguments" , snake_case__ : "ProcessingArguments" , snake_case__ : "IndexHnswArguments" , ):
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
A = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
A = dataset.map(snake_case__ , batched=snake_case__ , num_proc=processing_args.num_proc )
# And compute the embeddings
A = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case__ )
A = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
A = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
A = dataset.map(
partial(snake_case__ , ctx_encoder=snake_case__ , ctx_tokenizer=snake_case__ ) , batched=snake_case__ , batch_size=processing_args.batch_size , features=snake_case__ , )
# And finally save your dataset
A = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(snake_case__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
A = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=snake_case__ )
# And save the index
A = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(snake_case__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
default=str(Path(_lowercase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
_lowerCamelCase: str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
_lowerCamelCase: str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
_lowerCamelCase: Optional[str] = field(
default=str(Path(_lowercase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Optional[int] = field(
default=_lowercase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
_lowerCamelCase: int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
_lowerCamelCase: int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowercase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowercase , _lowercase , _lowercase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowercase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a_ ( __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =[2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ =True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ =True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ =True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ =[3, 3, 3, 3]
lowerCamelCase_ =[5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ =[4, 4, 4, 4]
lowerCamelCase_ =[3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ =[3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ =[3, 3, 3, 3]
else:
lowerCamelCase_ =[2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ =96
elif "small" in model_name:
lowerCamelCase_ =96
elif "base" in model_name:
lowerCamelCase_ =128
elif "large" in model_name:
lowerCamelCase_ =192
elif "xlarge" in model_name:
lowerCamelCase_ =256
elif "huge" in model_name:
lowerCamelCase_ =352
# set label information
lowerCamelCase_ ='''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ ='''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ ='''imagenet-1k-id2label.json'''
lowerCamelCase_ =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
lowerCamelCase_ =FocalNetConfig(
embed_dim=__snake_case , depths=__snake_case , focal_levels=__snake_case , focal_windows=__snake_case , use_conv_embed=__snake_case , idalabel=__snake_case , labelaid=__snake_case , use_post_layernorm=__snake_case , use_layerscale=__snake_case , )
return config
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCamelCase_ =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ ='''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ =name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ =name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ =name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ =name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ =name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ =name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ ='''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ ='''layernorm.bias'''
if "head" in name:
lowerCamelCase_ =name.replace('''head''' , '''classifier''' )
else:
lowerCamelCase_ ='''focalnet.''' + name
return name
def a_ ( __snake_case : List[Any] , __snake_case : List[str] , __snake_case : int=False ) -> Optional[int]:
"""simple docstring"""
# fmt: off
lowerCamelCase_ ={
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ =model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __snake_case )
lowerCamelCase_ =torch.hub.load_state_dict_from_url(__snake_case , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ =state_dict.pop(__snake_case )
lowerCamelCase_ =val
lowerCamelCase_ =get_focalnet_config(__snake_case )
lowerCamelCase_ =FocalNetForImageClassification(__snake_case )
model.eval()
# load state dict
model.load_state_dict(__snake_case )
# verify conversion
lowerCamelCase_ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ =BitImageProcessor(
do_resize=__snake_case , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__snake_case , crop_size=224 , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , )
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
lowerCamelCase_ =processor(images=__snake_case , return_tensors='''pt''' )
lowerCamelCase_ =transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCamelCase_ =image_transforms(__snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __snake_case , atol=1e-4 )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ =torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ =torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCamelCase_ =torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ =torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCamelCase_ =torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ =torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
a_ : int = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 75 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[Any] = BlipImageProcessor()
SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE : List[str] = BlipProcessor(a , a )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Any , **a : Tuple ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).tokenizer
def __UpperCamelCase ( self : int , **a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor(do_normalize=a , padding_value=1.0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = BlipProcessor(tokenizer=a , image_processor=a )
SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Tuple = image_processor(a , return_tensors="np" )
SCREAMING_SNAKE_CASE : Dict = processor(images=a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = BlipProcessor(tokenizer=a , image_processor=a )
SCREAMING_SNAKE_CASE : List[str] = "lower newer"
SCREAMING_SNAKE_CASE : Tuple = processor(text=a )
SCREAMING_SNAKE_CASE : str = tokenizer(a , return_token_type_ids=a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BlipProcessor(tokenizer=a , image_processor=a )
SCREAMING_SNAKE_CASE : Any = "lower newer"
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Any = processor(text=a , images=a )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BlipProcessor(tokenizer=a , image_processor=a )
SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.batch_decode(a )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(a )
self.assertListEqual(a , a )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BlipProcessor(tokenizer=a , image_processor=a )
SCREAMING_SNAKE_CASE : Optional[int] = "lower newer"
SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=a , images=a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 76 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Tuple = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = "data2vec-vision"
def __init__( self , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-12 , a=2_2_4 , a=1_6 , a=3 , a=False , a=False , a=False , a=False , a=0.1 , a=0.1 , a=True , a=[3, 5, 7, 1_1] , a=[1, 2, 3, 6] , a=True , a=0.4 , a=2_5_6 , a=1 , a=False , a=2_5_5 , **a , ) -> Optional[int]:
super().__init__(**a )
lowercase__ : Any = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Tuple = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Union[str, Any] = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Dict = use_mask_token
lowercase__ : List[str] = use_absolute_position_embeddings
lowercase__ : List[Any] = use_relative_position_bias
lowercase__ : List[str] = use_shared_relative_position_bias
lowercase__ : Tuple = layer_scale_init_value
lowercase__ : int = drop_path_rate
lowercase__ : List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ : Tuple = out_indices
lowercase__ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ : Optional[int] = use_auxiliary_head
lowercase__ : int = auxiliary_loss_weight
lowercase__ : List[str] = auxiliary_channels
lowercase__ : str = auxiliary_num_convs
lowercase__ : int = auxiliary_concat_input
lowercase__ : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Union[str, Any] = version.parse("1.11")
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
| 77 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
snake_case_ = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCamelCase = """nezha"""
def __init__( self :List[Any] , lowercase_ :Optional[Any]=2_11_28 , lowercase_ :List[str]=7_68 , lowercase_ :List[str]=12 , lowercase_ :Dict=12 , lowercase_ :Tuple=30_72 , lowercase_ :Optional[int]="gelu" , lowercase_ :Optional[Any]=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :List[Any]=5_12 , lowercase_ :Tuple=64 , lowercase_ :str=2 , lowercase_ :Optional[Any]=0.02 , lowercase_ :int=1E-12 , lowercase_ :Any=0.1 , lowercase_ :Optional[int]=0 , lowercase_ :Any=2 , lowercase_ :Dict=3 , lowercase_ :Any=True , **lowercase_ :Tuple , ) -> List[Any]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = max_relative_position
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = classifier_dropout
UpperCAmelCase = use_cache
| 78 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase_ = get_tests_dir('''fixtures''')
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = mock.Mock()
_A = 500
_A = {}
_A = HTTPError
_A = {}
# Download this model to make sure it's in the cache.
_A = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCAmelCase ) as mock_head:
_A = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : str ):
'''simple docstring'''
_A = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def lowerCAmelCase ( cls : Any ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = WavaVecaFeatureExtractor.from_pretrained(__UpperCAmelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
_A = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCAmelCase , repo_id="test-feature-extractor" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
_A = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = WavaVecaFeatureExtractor.from_pretrained(__UpperCAmelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
_A = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCAmelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
_A = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
_A = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
_A = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 79 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
'''simple docstring'''
a__ : List[Any] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 80 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
"""simple docstring"""
def _A ( lowercase ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowerCamelCase_ : str = int(input("""Enter number: """).strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.') | 81 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = AudioLDMPipeline
__lowerCamelCase = TEXT_TO_AUDIO_PARAMS
__lowerCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCamelCase = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase = ClapTextModelWithProjection(_snake_case )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , )
_lowerCAmelCase = SpeechTaHifiGan(_snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
embeds.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """egg cracking"""
_lowerCAmelCase = audioldm_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = ["""hey"""]
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase = SpeechTaHifiGan(_snake_case ).to(_snake_case )
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = 25
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[77230:77240]
_lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[27780:27790]
_lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 82 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 0 |
'''simple docstring'''
import os
def A__ ( ):
_UpperCamelCase : List[str] = os.path.join(os.path.dirname(UpperCAmelCase_ ) , 'num.txt' )
with open(UpperCAmelCase_ ) as file_hand:
return str(sum(int(UpperCAmelCase_ ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 83 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 84 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : List[str] = TypeVar("T")
class _snake_case ( Generic[T] ):
def __init__( self , a__ = True ) -> None:
'''simple docstring'''
snake_case_ = {} # dictionary of lists
snake_case_ = directed
def lowerCAmelCase__ ( self , a__ , a__ ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
self.adj_list[destination_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a__ )
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case_ = [destination_vertex]
snake_case_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case_ = [destination_vertex]
snake_case_ = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 85 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 0 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = """Hello world! cécé herlolip"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = FairseqRobertaModel.from_pretrained(_UpperCamelCase )
roberta.eval() # disable dropout
__lowerCAmelCase : Any = roberta.model.encoder.sentence_encoder
__lowerCAmelCase : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _UpperCamelCase )
__lowerCAmelCase : Tuple = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCAmelCase : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
__lowerCAmelCase : str = roberta_sent_encoder.embed_positions.weight
__lowerCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCAmelCase : int = roberta_sent_encoder.layer_norm.weight
__lowerCAmelCase : Optional[int] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCAmelCase : BertLayer = model.roberta.encoder.layer[i]
__lowerCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
__lowerCAmelCase : RobertaAttention = layer.attention
__lowerCAmelCase : int = roberta_layer.self_attn_layer_norm.weight
__lowerCAmelCase : Optional[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCAmelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.q_proj.weight
__lowerCAmelCase : Dict = roberta_layer.self_attn.q_proj.bias
__lowerCAmelCase : int = roberta_layer.self_attn.k_proj.weight
__lowerCAmelCase : Any = roberta_layer.self_attn.k_proj.bias
__lowerCAmelCase : Optional[int] = roberta_layer.self_attn.v_proj.weight
__lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCAmelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
__lowerCAmelCase : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCAmelCase : str = roberta_layer.final_layer_norm.weight
__lowerCAmelCase : str = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCAmelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase : str = roberta_layer.fca.weight
__lowerCAmelCase : Optional[int] = roberta_layer.fca.bias
# output
__lowerCAmelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase : Tuple = roberta_layer.fca.weight
__lowerCAmelCase : Union[str, Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCAmelCase : List[str] = roberta.model.classification_heads['mnli'].dense.weight
__lowerCAmelCase : Tuple = roberta.model.classification_heads['mnli'].dense.bias
__lowerCAmelCase : List[Any] = roberta.model.classification_heads['mnli'].out_proj.weight
__lowerCAmelCase : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowerCAmelCase : int = roberta.model.encoder.lm_head.dense.weight
__lowerCAmelCase : int = roberta.model.encoder.lm_head.dense.bias
__lowerCAmelCase : Optional[int] = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCAmelCase : List[Any] = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCAmelCase : Any = roberta.model.encoder.lm_head.weight
__lowerCAmelCase : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCAmelCase : torch.Tensor = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
__lowerCAmelCase : Any = model(_UpperCamelCase )[0]
if classification_head:
__lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['mnli'](roberta.extract_features(_UpperCamelCase ) )
else:
__lowerCAmelCase : List[Any] = roberta.model(_UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
__lowerCAmelCase : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__lowerCAmelCase : Optional[Any] = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCamelCase__ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 86 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ ( __A ):
__A : Any = ["pixel_values"]
def __init__( self : str , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , **lowercase_ : Dict , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : List[str] = size if size is not None else {"shortest_edge": 2_24}
lowercase__ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : Optional[Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
lowercase__ : str = get_size_dict(lowercase_ , param_name="crop_size" )
lowercase__ : Tuple = do_resize
lowercase__ : int = size
lowercase__ : str = resample
lowercase__ : Optional[Any] = do_rescale
lowercase__ : Tuple = rescale_factor
lowercase__ : List[str] = do_center_crop
lowercase__ : List[str] = crop_size
lowercase__ : Dict = do_flip_channel_order
def __UpperCamelCase ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PIL.Image.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
lowercase__ : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : Optional[int] = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray:
lowercase__ : Any = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ) -> Optional[Any]:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(lowercase_ , data_format=lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Tuple , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Any = resample if resample is not None else self.resample
lowercase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : int = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : int = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : str = crop_size if crop_size is not None else self.crop_size
lowercase__ : str = get_size_dict(lowercase_ , param_name="crop_size" )
lowercase__ : Optional[Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowercase__ : Tuple = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : List[Any] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase__ : str = [self.flip_channel_order(image=lowercase_ ) for image in images]
lowercase__ : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : List[Tuple] = None ) -> List[Any]:
lowercase__ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase_ ):
lowercase__ : Any = target_sizes.numpy()
lowercase__ : Optional[Any] = []
for idx in range(len(lowercase_ ) ):
lowercase__ : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_ )
lowercase__ : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowercase__ : Optional[int] = logits.argmax(dim=1 )
lowercase__ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 87 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
from __future__ import annotations
import math
def a__ ( A_ ):
'''simple docstring'''
if num <= 0:
__magic_name__ = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(A_ )
__magic_name__ = [True] * (num + 1)
__magic_name__ = []
__magic_name__ = 2
__magic_name__ = int(math.sqrt(A_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A_ )
# Set multiples of start be False
for i in range(start * start, num + 1, A_ ):
if sieve[i] is True:
__magic_name__ = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(A_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 88 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __magic_name__ :
pass
| 89 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''LayoutLMv2ImageProcessor'''
snake_case_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCamelCase__ , )
__lowerCamelCase = kwargs.pop('feature_extractor' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['words']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
__lowerCamelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCamelCase = images
return encoded_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}""" )
return images_with_overflow
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCamelCase__ , )
return self.image_processor_class
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCamelCase__ , )
return self.image_processor
| 90 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
"""simple docstring"""
def _A (__a ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : list[float] ):
if len(SCREAMING_SNAKE_CASE_ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 , __SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Tuple = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'biogpt'
def __init__( self , _lowerCamelCase=4_2384 , _lowerCamelCase=1024 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=4096 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1024 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
a :str = vocab_size
a :List[str] = max_position_embeddings
a :str = hidden_size
a :List[str] = num_hidden_layers
a :Optional[Any] = num_attention_heads
a :Any = intermediate_size
a :Union[str, Any] = hidden_act
a :Optional[Any] = hidden_dropout_prob
a :Optional[int] = attention_probs_dropout_prob
a :Tuple = initializer_range
a :Dict = layer_norm_eps
a :List[str] = scale_embedding
a :Any = use_cache
a :Union[str, Any] = layerdrop
a :str = activation_dropout
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
| 94 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=True , lowerCAmelCase__=1 / 2_5_5 , lowerCAmelCase__=True , ) -> str:
'''simple docstring'''
a__ : str =size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
a__ : Tuple =parent
a__ : Any =batch_size
a__ : int =num_channels
a__ : List[str] =min_resolution
a__ : Tuple =max_resolution
a__ : Tuple =do_resize
a__ : Dict =size
a__ : List[str] =do_normalize
a__ : Optional[Any] =image_mean
a__ : Tuple =image_std
a__ : Dict =do_rescale
a__ : List[Any] =rescale_factor
a__ : Optional[Any] =do_pad
def _lowercase ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
'''simple docstring'''
if not batched:
a__ : Tuple =image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
a__ , a__ : Union[str, Any] =image.size
else:
a__ , a__ : Optional[int] =image.shape[1], image.shape[2]
if w < h:
a__ : Optional[int] =int(self.size["shortest_edge"] * h / w )
a__ : List[Any] =self.size["shortest_edge"]
elif w > h:
a__ : Dict =self.size["shortest_edge"]
a__ : List[Any] =int(self.size["shortest_edge"] * w / h )
else:
a__ : List[Any] =self.size["shortest_edge"]
a__ : int =self.size["shortest_edge"]
else:
a__ : List[Any] =[]
for image in image_inputs:
a__ , a__ : Optional[int] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ : Any =max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
a__ : Union[str, Any] =max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Tuple = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_rescale" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_pad" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
a__ : List[str] =self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ : List[Any] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ : List[str] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
a__ : Optional[int] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ : Dict =self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
a__ , a__ : List[str] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : int =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ : List[Any] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
a__ , a__ : Tuple =self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : int =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a__ : Optional[int] =json.loads(f.read() )
a__ : Any ={"image_id": 3_9_7_6_9, "annotations": target}
# encode them
a__ : Union[str, Any] =DeformableDetrImageProcessor()
a__ : Tuple =image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
a__ : Tuple =torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
a__ : Any =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
a__ : List[Any] =torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
a__ : Any =torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
a__ : Optional[Any] =torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
a__ : int =torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
a__ : str =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
a__ : List[str] =torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
a__ : Optional[int] =torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
a__ : int =torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : List[str] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a__ : int =json.loads(f.read() )
a__ : List[str] ={"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
a__ : List[Any] =pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a__ : Any =DeformableDetrImageProcessor(format="coco_panoptic" )
a__ : Optional[Any] =image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
a__ : Union[str, Any] =torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
a__ : Union[str, Any] =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
a__ : Dict =torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
a__ : Optional[int] =torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
a__ : Any =torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
a__ : int =torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
a__ : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
a__ : Optional[Any] =torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
a__ : int =8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
a__ : List[str] =torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
a__ : int =torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 95 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
"""simple docstring"""
import socket
def _snake_case ( ):
_lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase : Union[str, Any] = socket.gethostname()
_lowerCamelCase : List[Any] = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCamelCase : int = sock.recv(1024 )
if not data:
break
out_file.write(lowercase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 96 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
'''simple docstring'''
from __future__ import annotations
def a ( __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = 2
UpperCamelCase__ :Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__a )
if n > 1:
factors.append(__a )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def a_ ( lowerCamelCase ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( A__ , A__ , A__ ) -> int:
def get_masked_lm_array(A__ ):
a__ : Dict = F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : List[str] = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
a__ : str = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_array(A__ ):
a__ : Dict = F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : str = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
a__ : Tuple = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_layer_array(A__ , A__ ):
a__ : str = F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : List[str] = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
a__ : str = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_attention_layer_array(A__ , A__ , A__ ):
a__ : Tuple = F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : Dict = tf.train.load_variable(A__ , A__ )
a__ : List[Any] = array.reshape(A__ )
if "kernel" in name:
a__ : Tuple = array.transpose()
return torch.from_numpy(A__ )
print(F'Loading model based on config from {config_path}...' )
a__ : List[str] = BertConfig.from_json_file(A__ )
a__ : str = BertForMaskedLM(A__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
a__ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
a__ : BertSelfAttention = layer.attention.self
a__ : List[Any] = get_encoder_attention_layer_array(
A__ , '_query_dense/kernel' , self_attn.query.weight.data.shape )
a__ : Optional[Any] = get_encoder_attention_layer_array(
A__ , '_query_dense/bias' , self_attn.query.bias.data.shape )
a__ : List[Any] = get_encoder_attention_layer_array(
A__ , '_key_dense/kernel' , self_attn.key.weight.data.shape )
a__ : Union[str, Any] = get_encoder_attention_layer_array(
A__ , '_key_dense/bias' , self_attn.key.bias.data.shape )
a__ : Dict = get_encoder_attention_layer_array(
A__ , '_value_dense/kernel' , self_attn.value.weight.data.shape )
a__ : List[Any] = get_encoder_attention_layer_array(
A__ , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
a__ : BertSelfOutput = layer.attention.output
a__ : Optional[Any] = get_encoder_attention_layer_array(
A__ , '_output_dense/kernel' , self_output.dense.weight.data.shape )
a__ : Any = get_encoder_attention_layer_array(
A__ , '_output_dense/bias' , self_output.dense.bias.data.shape )
a__ : str = get_encoder_layer_array(A__ , '_attention_layer_norm/gamma' )
a__ : Optional[Any] = get_encoder_layer_array(A__ , '_attention_layer_norm/beta' )
# Intermediate
a__ : BertIntermediate = layer.intermediate
a__ : Tuple = get_encoder_layer_array(A__ , '_intermediate_dense/kernel' )
a__ : Tuple = get_encoder_layer_array(A__ , '_intermediate_dense/bias' )
# Output
a__ : BertOutput = layer.output
a__ : Optional[Any] = get_encoder_layer_array(A__ , '_output_dense/kernel' )
a__ : List[str] = get_encoder_layer_array(A__ , '_output_dense/bias' )
a__ : Optional[int] = get_encoder_layer_array(A__ , '_output_layer_norm/gamma' )
a__ : Optional[int] = get_encoder_layer_array(A__ , '_output_layer_norm/beta' )
# Embeddings
a__ : List[str] = get_encoder_array('_position_embedding_layer/embeddings' )
a__ : Dict = get_encoder_array('_type_embedding_layer/embeddings' )
a__ : Dict = get_encoder_array('_embedding_norm_layer/gamma' )
a__ : List[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
a__ : Tuple = model.cls.predictions.transform
a__ : List[Any] = get_masked_lm_array('dense/kernel' )
a__ : Tuple = get_masked_lm_array('dense/bias' )
a__ : List[str] = get_masked_lm_array('layer_norm/gamma' )
a__ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
a__ : Union[str, Any] = get_masked_lm_array('embedding_table' )
# Pooling
a__ : Any = BertPooler(config=A__ )
a__ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
a__ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(A__ )
# Integration test - should load without any errors ;)
a__ : Any = BertForMaskedLM.from_pretrained(A__ )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowercase : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 99 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : Dict = BlenderbotConfig
__lowercase : Optional[int] = {}
__lowercase : Dict = '''gelu'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
return config, inputs_dict
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFBlenderbotModel(config=lowerCAmelCase__).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1)
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1]))
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : int = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : List[str] = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Tuple = True
__lowercase : Optional[int] = False
__lowercase : List[Any] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFBlenderbotModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__)
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = ['''My friends are cool but they eat too many carbs.''']
__lowercase : int = '''facebook/blenderbot-400M-distill'''
@cached_property
def snake_case_ ( self):
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , return_tensors="""tf""")
__SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , )
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 100 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase_ : str =1
@register_to_config
def __init__( self ,A__=2_0_0_0 ,A__=0.1 ,A__=2_0 ,A__=1E-3):
lowercase = None
lowercase = None
lowercase = None
def A__ ( self ,A__ ,A__ = None):
lowercase = torch.linspace(1 ,self.config.sampling_eps ,A__ ,device=A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__=None):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''')
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowercase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowercase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
lowercase = std.flatten()
while len(std.shape) < len(score.shape):
lowercase = std.unsqueeze(-1)
lowercase = -score / std
# compute
lowercase = -1.0 / len(self.timesteps)
lowercase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowercase = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
lowercase = beta_t.unsqueeze(-1)
lowercase = -0.5 * beta_t * x
lowercase = torch.sqrt(A__)
lowercase = drift - diffusion**2 * score
lowercase = x + drift * dt
# add noise
lowercase = randn_tensor(x.shape ,layout=x.layout ,generator=A__ ,device=x.device ,dtype=x.dtype)
lowercase = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__( self):
return self.config.num_train_timesteps
| 101 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase ( _snake_case : str = "" ) ->dict[str, float]:
"""simple docstring"""
__snake_case : List[Any] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
__snake_case : Dict = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
__snake_case : Union[str, Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
__snake_case : int = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_snake_case , _snake_case )
}
def lowercase ( _snake_case : str = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
__snake_case : List[Any] = get_imdb_top_aaa_movies()
with open(_snake_case , '''w''' , newline='''''' ) as out_file:
__snake_case : Dict = csv.writer(_snake_case )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 102 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
def UpperCamelCase( __UpperCamelCase : int = 200 ):
lowerCAmelCase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCAmelCase_ : Union[str, Any] = [0] * (pence + 1)
lowerCAmelCase_ : int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__UpperCamelCase ,pence + 1 ,1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 103 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = x
__lowercase = y
for step in range(A__ ): # noqa: B007
__lowercase = a * a - b * b + x
__lowercase = 2 * a * b + y
__lowercase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _A ( A__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _A ( A__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(A__ , 1 , 1 ) )
def _A ( A__ = 800 , A__ = 600 , A__ = -0.6 , A__ = 0 , A__ = 3.2 , A__ = 50 , A__ = True , ):
"""simple docstring"""
__lowercase = Image.new('''RGB''' , (image_width, image_height) )
__lowercase = img.load()
# loop through the image-coordinates
for image_x in range(A__ ):
for image_y in range(A__ ):
# determine the figure-coordinates based on the image-coordinates
__lowercase = figure_width / image_width * image_height
__lowercase = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowercase = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowercase = get_distance(A__ , A__ , A__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowercase = get_color_coded_rgb(A__ )
else:
__lowercase = get_black_and_white_rgb(A__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 104 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Dict:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Tuple = pipeline(
"document-question-answering" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a : Optional[int] = INVOICE_URL
a : str = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
a : Union[str, Any] = "What is the placebo?"
a : Dict = [
{
"image": load_image(lowerCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> List[Any]:
a : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
a : Dict = INVOICE_URL
a : List[str] = "How many cats are there?"
a : Tuple = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
a : Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
a : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
a : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Tuple = []
a : Optional[int] = []
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Tuple:
a : int = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
a : List[str] = INVOICE_URL
a : List[Any] = "What is the invoice number?"
a : int = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Optional[int]:
a : List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> str:
a : Optional[int] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : int = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , )
a : List[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
a : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> Tuple:
a : int = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : Tuple = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , max_seq_len=50 , )
a : List[str] = INVOICE_URL
a : Union[str, Any] = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
a : List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
a : List[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def __a ( self ) -> int:
a : Tuple = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def __a ( self ) -> int:
pass
| 105 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : List[str] = old_name
if "patch_embed" in old_name:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = old_name.split('''.''' )
if layer == "0":
lowerCAmelCase__ : str = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
lowerCAmelCase__ : List[Any] = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
lowerCAmelCase__ : Optional[Any] = old_name.replace('''3''' , '''convolution2''' )
else:
lowerCAmelCase__ : Optional[int] = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , A_ ):
lowerCAmelCase__ : int = r'''\b\d{2}\b'''
if bool(re.search(A_ , A_ ) ):
lowerCAmelCase__ : Union[str, Any] = re.search(r'''\d\.\d\d.''' , A_ ).group()
else:
lowerCAmelCase__ : Tuple = re.search(r'''\d\.\d.''' , A_ ).group()
if int(match[0] ) < 6:
lowerCAmelCase__ : Tuple = old_name.replace(A_ , '''''' )
lowerCAmelCase__ : int = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
lowerCAmelCase__ : str = '''intermediate_stages.''' + trimmed_name
else:
lowerCAmelCase__ : Dict = old_name.replace(A_ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCAmelCase__ : str = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
lowerCAmelCase__ : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowerCAmelCase__ : Tuple = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
lowerCAmelCase__ : Any = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
lowerCAmelCase__ : str = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
lowerCAmelCase__ : int = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
lowerCAmelCase__ : int = trimmed_name.replace('''fc2''' , '''linear_out''' )
lowerCAmelCase__ : Any = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , A_ ):
lowerCAmelCase__ : Tuple = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
lowerCAmelCase__ : Union[str, Any] = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCAmelCase__ : int = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCAmelCase__ : str = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
lowerCAmelCase__ : Tuple = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
lowerCAmelCase__ : Any = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
lowerCAmelCase__ : Tuple = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
lowerCAmelCase__ : Optional[int] = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCAmelCase__ : Optional[int] = new_name.replace('''norm''' , '''layernorm''' )
lowerCAmelCase__ : Tuple = '''efficientformer.''' + new_name
else:
lowerCAmelCase__ : Dict = '''efficientformer.encoder.''' + new_name
return new_name
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
for key in checkpoint.copy().keys():
lowerCAmelCase__ : List[Any] = checkpoint.pop(A_ )
lowerCAmelCase__ : List[str] = val
return checkpoint
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(A_ , stream=A_ ).raw )
return image
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Tuple = torch.load(A_ , map_location='''cpu''' )['''model''']
lowerCAmelCase__ : int = EfficientFormerConfig.from_json_file(A_ )
lowerCAmelCase__ : Tuple = EfficientFormerForImageClassificationWithTeacher(A_ )
lowerCAmelCase__ : Dict = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
lowerCAmelCase__ : Dict = config.depths[-1] - config.num_metaad_blocks + 1
lowerCAmelCase__ : Any = convert_torch_checkpoint(A_ , A_ )
model.load_state_dict(A_ )
model.eval()
lowerCAmelCase__ : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = 2_56
lowerCAmelCase__ : List[str] = 2_24
lowerCAmelCase__ : int = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
lowerCAmelCase__ : Optional[int] = processor(images=A_ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
lowerCAmelCase__ : Optional[int] = Compose(
[
Resize(A_ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(A_ ),
ToTensor(),
Normalize(A_ , A_ ),
] )
lowerCAmelCase__ : List[str] = image_transforms(A_ ).unsqueeze(0 )
assert torch.allclose(A_ , A_ )
lowerCAmelCase__ : Dict = model(A_ )
lowerCAmelCase__ : int = outputs.logits
lowerCAmelCase__ : Tuple = (1, 10_00)
if "l1" in model_name:
lowerCAmelCase__ : Optional[Any] = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCAmelCase__ : Optional[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCAmelCase__ : Dict = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(A_ )
print(f'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='''Add model''' , use_temp_dir=A_ , )
processor.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='''Add image processor''' , use_temp_dir=A_ , )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
__UpperCamelCase : str = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 106 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase : Optional[int] = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase : List[Any] = BASE_URL + '/user'
# https://github.com/settings/tokens
__lowerCAmelCase : Optional[Any] = os.environ.get('USER_TOKEN', '')
def __magic_name__ ( A : str ):
'''simple docstring'''
a = {
"Authorization": F"""token {auth_token}""",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(A, headers=A ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 0 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCAmelCase__ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowerCAmelCase__ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowerCAmelCase__ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase : Optional[int] = np.array([re.sub(snake_case__ , "" , snake_case__ ) for x in predictions] )
lowerCAmelCase : Dict = np.array([re.sub(snake_case__ , "" , snake_case__ ) for x in references] )
else:
lowerCAmelCase : Optional[int] = np.asarray(snake_case__ )
lowerCAmelCase : List[Any] = np.asarray(snake_case__ )
if ignore_case:
lowerCAmelCase : List[Any] = np.char.lower(snake_case__ )
lowerCAmelCase : int = np.char.lower(snake_case__ )
if ignore_punctuation:
lowerCAmelCase : Any = string.punctuation.maketrans("" , "" , string.punctuation )
lowerCAmelCase : Any = np.char.translate(snake_case__ , table=snake_case__ )
lowerCAmelCase : List[str] = np.char.translate(snake_case__ , table=snake_case__ )
if ignore_numbers:
lowerCAmelCase : Optional[int] = string.digits.maketrans("" , "" , string.digits )
lowerCAmelCase : List[str] = np.char.translate(snake_case__ , table=snake_case__ )
lowerCAmelCase : List[Any] = np.char.translate(snake_case__ , table=snake_case__ )
lowerCAmelCase : str = predictions == references
return {"exact_match": np.mean(snake_case__ ) * 100}
| 108 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 0 |
"""simple docstring"""
A: Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def _snake_case ( UpperCamelCase : float ):
assert type(UpperCamelCase ) in (int, float) and decimal == int(UpperCamelCase )
UpperCAmelCase : str = int(UpperCamelCase )
UpperCAmelCase : Optional[int] = """"""
UpperCAmelCase : List[str] = False
if decimal < 0:
UpperCAmelCase : Any = True
decimal *= -1
while decimal > 0:
UpperCAmelCase , UpperCAmelCase : Dict = divmod(UpperCamelCase , 16 )
UpperCAmelCase : Union[str, Any] = values[remainder] + hexadecimal
UpperCAmelCase : int = """0x""" + hexadecimal
if negative:
UpperCAmelCase : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
# pass variant but use the non-variant filenames
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(A_ ,variant=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
# pass variant but use the non-variant filenames
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(A_ ,variant=A_ ) ) | 74 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'{price_plus_tax(100, 0.25) = }')
print(F'{price_plus_tax(125.50, 0.05) = }')
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A : Optional[Any] = object()
# For specifying empty leaf dict `{}`
A : Any = object()
def lowercase_ ( _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(lowerCAmelCase_ ) - len(lowerCAmelCase_ ) + 1 ):
lowerCamelCase__ : List[Any] = [x.match(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , ks[i:] )]
if matches and all(lowerCAmelCase_ ):
return True
return False
def lowercase_ ( _A : Union[str, Any] ):
"""simple docstring"""
def replace(_A : Tuple , _A : Union[str, Any] ):
for rule, replacement in rules:
if _match(lowerCAmelCase_ , lowerCAmelCase_ ):
return replacement
return val
return replace
def lowercase_ ( ):
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , lowerCAmelCase_ )),
(("transformer", "wte", "embedding"), P("mp" , lowerCAmelCase_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , lowerCAmelCase_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , lowerCAmelCase_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple = _get_partition_rules()
lowerCamelCase__ : List[Any] = _replacement_rules(lowerCAmelCase_ )
lowerCamelCase__ : str = {k: _unmatched for k in flatten_dict(lowerCAmelCase_ )}
lowerCamelCase__ : Optional[int] = {k: replace(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase_ ) )
| 184 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
'''simple docstring'''
a__ : List[str] =256
# Modulus to hash a string
a__ : List[str] =1_000_003
def lowercase__ ( __lowercase : Dict , __lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
__UpperCamelCase = len(lowerCAmelCase_ )
__UpperCamelCase = len(lowerCAmelCase_ )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCAmelCase_ ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = 'abc1abc12'
__UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ ) and not rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 2)
__UpperCamelCase = 'ABABX'
__UpperCamelCase = 'ABABZABABYABABX'
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 3)
__UpperCamelCase = 'AAAB'
__UpperCamelCase = 'ABAAAAAB'
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 4)
__UpperCamelCase = 'abcdabcy'
__UpperCamelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 5)
__UpperCamelCase = 'Lü'
__UpperCamelCase = 'Lüsai'
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCamelCase = 'Lue'
assert not rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Tuple = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase_ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
UpperCamelCase : Optional[Any] = torch.load(hf_hub_download(repo_id=lowerCAmelCase_ , filename="pytorch_model.bin" ) )
UpperCamelCase : Optional[int] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
UpperCamelCase : List[Any] = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
UpperCamelCase : List[str] = tensor_value
UpperCamelCase : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase_ , config=lowerCAmelCase_ , state_dict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
# convert tokenizer
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 52 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Optional[Any] = logging.get_logger(__name__)
__a: Optional[Any] = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "pegasus"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __lowerCAmelCase=50265 , __lowerCAmelCase=1024 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=1024 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=0 , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , **__lowerCAmelCase , ) -> List[str]:
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = max_position_embeddings
lowercase__ : str = d_model
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : str = encoder_layers
lowercase__ : int = encoder_attention_heads
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : Optional[int] = decoder_layers
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : List[str] = attention_dropout
lowercase__ : Tuple = activation_dropout
lowercase__ : Optional[int] = activation_function
lowercase__ : List[str] = init_std
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : int = use_cache
lowercase__ : str = encoder_layers
lowercase__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
@property
def _lowerCAmelCase( self ) -> Tuple:
return self.encoder_attention_heads
@property
def _lowerCAmelCase( self ) -> Any:
return self.d_model
| 198 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 29 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __snake_case ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : Tuple , A : Optional[DatasetInfo] = None , A : Optional[str] = None , **A : Tuple , ):
super().__init__(self , **A )
__snake_case: int = repo_info
__snake_case: Optional[Any] = token
__snake_case: str = None
def UpperCAmelCase__ ( self : List[Any] ):
if self.dir_cache is None:
__snake_case: Optional[int] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__snake_case: List[Any] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A ): {"""name""": str(A ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCAmelCase__ ( self : Optional[Any] , A : str , A : str = "rb" , **A : Dict , ):
if not isinstance(self.repo_info , A ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__snake_case: str = hf_hub_url(self.repo_info.id , A , revision=self.repo_info.sha )
return fsspec.open(
A , mode=A , headers=get_authentication_headers_for_url(A , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCAmelCase__ ( self : Dict , A : Optional[int] , **A : Optional[Any] ):
self._get_dirs()
__snake_case: str = self._strip_protocol(A )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A )
def UpperCAmelCase__ ( self : str , A : Any , A : Union[str, Any]=False , **A : str ):
self._get_dirs()
__snake_case: Union[str, Any] = PurePosixPath(path.strip("""/""" ) )
__snake_case: int = {}
for p, f in self.dir_cache.items():
__snake_case: int = PurePosixPath(p.strip("""/""" ) )
__snake_case: Optional[int] = p.parent
if root == path:
__snake_case: Tuple = f
__snake_case: Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_lowerCAmelCase = '''\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'''
_lowerCAmelCase = '''\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It\'s like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It\'s like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n'''
_lowerCAmelCase = '''\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,reference_urls=[] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,) -> Optional[int]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase__ : List[Any] = np.array([re.sub(__UpperCAmelCase ,"""""" ,__UpperCAmelCase ) for x in predictions] )
lowerCAmelCase__ : str = np.array([re.sub(__UpperCAmelCase ,"""""" ,__UpperCAmelCase ) for x in references] )
else:
lowerCAmelCase__ : Union[str, Any] = np.asarray(__UpperCAmelCase )
lowerCAmelCase__ : Any = np.asarray(__UpperCAmelCase )
if ignore_case:
lowerCAmelCase__ : Optional[Any] = np.char.lower(__UpperCAmelCase )
lowerCAmelCase__ : Any = np.char.lower(__UpperCAmelCase )
if ignore_punctuation:
lowerCAmelCase__ : Optional[int] = string.punctuation.maketrans("""""" ,"""""" ,string.punctuation )
lowerCAmelCase__ : List[Any] = np.char.translate(__UpperCAmelCase ,table=__UpperCAmelCase )
lowerCAmelCase__ : Any = np.char.translate(__UpperCAmelCase ,table=__UpperCAmelCase )
if ignore_numbers:
lowerCAmelCase__ : Any = string.digits.maketrans("""""" ,"""""" ,string.digits )
lowerCAmelCase__ : str = np.char.translate(__UpperCAmelCase ,table=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = np.char.translate(__UpperCAmelCase ,table=__UpperCAmelCase )
lowerCAmelCase__ : str = predictions == references
return {"exact_match": np.mean(__UpperCAmelCase ) * 100}
| 37 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_snake_case : Optional[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
_snake_case : Optional[int] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n'
_snake_case : str = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
_snake_case : Optional[int] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n'
_snake_case : Optional[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Tuple ) -> Optional[int]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def lowercase ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=[1, 1_0, 1_0_0] , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Tuple=3.0 ) -> int:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCAmelCase_ ) as executor:
__lowerCAmelCase = []
__lowerCAmelCase = Counter()
__lowerCAmelCase = 0
__lowerCAmelCase = defaultdict(lowerCAmelCase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ):
for candidate in candidates:
__lowerCAmelCase = candidate + '\n' + test_case
__lowerCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
__lowerCAmelCase = executor.submit(lowerCAmelCase_ , *lowerCAmelCase_ )
futures.append(lowerCAmelCase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCAmelCase_ ):
__lowerCAmelCase = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__lowerCAmelCase , __lowerCAmelCase = [], []
for result in results.values():
result.sort()
__lowerCAmelCase = [r[1]['passed'] for r in result]
total.append(len(lowerCAmelCase_ ) )
correct.append(sum(lowerCAmelCase_ ) )
__lowerCAmelCase = np.array(lowerCAmelCase_ )
__lowerCAmelCase = np.array(lowerCAmelCase_ )
__lowerCAmelCase = k
__lowerCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : str, lowerCAmelCase_ : Tuple ):
def estimator(lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Any ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = itertools.repeat(lowerCAmelCase_, len(lowerCAmelCase_ ) )
else:
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = iter(lowerCAmelCase_ )
return np.array([estimator(int(lowerCAmelCase_ ), int(lowerCAmelCase_ ), lowerCAmelCase_ ) for n, c in zip(lowerCAmelCase_, lowerCAmelCase_ )] )
| 284 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
A__ = 8.314462 # Unit - J mol-1 K-1
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 230 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.