code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =x
a =y
for step in range(lowercase ): # noqa: B007
a =a * a - b * b + x
a =2 * a * b + y
a =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _A ( lowercase ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _A ( lowercase ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(lowercase , 1 , 1 ) )
def _A ( lowercase = 8_00 , lowercase = 6_00 , lowercase = -0.6 , lowercase = 0 , lowercase = 3.2 , lowercase = 50 , lowercase = True , ):
"""simple docstring"""
a =Image.new('''RGB''' , (image_width, image_height) )
a =img.load()
# loop through the image-coordinates
for image_x in range(lowercase ):
for image_y in range(lowercase ):
# determine the figure-coordinates based on the image-coordinates
a =figure_width / image_width * image_height
a =figure_center_x + (image_x / image_width - 0.5) * figure_width
a =figure_center_y + (image_y / image_height - 0.5) * figure_height
a =get_distance(lowercase , lowercase , lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
a =get_color_coded_rgb(lowercase )
else:
a =get_black_and_white_rgb(lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase_ : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 81 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ : List[Any] = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ : Optional[Any] = {
'''t5-small''': 5_12,
'''t5-base''': 5_12,
'''t5-large''': 5_12,
'''t5-3b''': 5_12,
'''t5-11b''': 5_12,
}
class _UpperCAmelCase ( __a):
__a : Optional[int] = VOCAB_FILES_NAMES
__a : int = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["""input_ids""", """attention_mask"""]
__a : Dict = TaTokenizer
__a : List[int] = []
def __init__( self , _A=None , _A=None , _A="</s>" , _A="<unk>" , _A="<pad>" , _A=1_00 , _A=None , **_A , ) -> Union[str, Any]:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase : Any = [f'''<extra_id_{i}>''' for i in range(_A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_UpperCAmelCase : List[str] = len(set(filter(lambda _A : bool("""extra_id_""" in str(_A ) ) , _A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
_A , tokenizer_file=_A , eos_token=_A , unk_token=_A , pad_token=_A , extra_ids=_A , additional_special_tokens=_A , **_A , )
_UpperCAmelCase : int = vocab_file
_UpperCAmelCase : Any = False if not self.vocab_file else True
_UpperCAmelCase : Optional[Any] = extra_ids
@staticmethod
def __snake_case ( _A , _A , _A ) -> Optional[int]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_UpperCAmelCase : Union[str, Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , _A , )
return max_model_length
def __snake_case ( self , _A , _A = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase : List[Any] = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __snake_case ( self , _A , _A = None ) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_UpperCAmelCase : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __snake_case ( self , _A , _A = None ) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda _A : bool(re.search(r"""<extra_id_\d+>""" , _A ) ) is not None , self.additional_special_tokens ) ) )
def __snake_case ( self ) -> int:
'''simple docstring'''
return [self.convert_tokens_to_ids(_A ) for token in self.get_sentinel_tokens()]
| 246 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=0.6 , __UpperCAmelCase=None , ) -> Optional[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =mask_ratio
_lowerCAmelCase =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCAmelCase =(image_size // patch_size) ** 2
_lowerCAmelCase =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Any:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_lowerCAmelCase =ViTMAEModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_lowerCAmelCase =model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =ViTMAEForPreTraining(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_lowerCAmelCase =model(_lowerCamelCase )
_lowerCAmelCase =(self.image_size // self.patch_size) ** 2
_lowerCAmelCase =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCAmelCase =1
_lowerCAmelCase =ViTMAEForPreTraining(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_lowerCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase =model(_lowerCamelCase )
_lowerCAmelCase =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =ViTMAEModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(_lowerCamelCase )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
# make masks reproducible
np.random.seed(2 )
_lowerCAmelCase =int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase =torch.from_numpy(_lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCAmelCase =pt_noise
super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase =outputs[0].cpu().numpy()
_lowerCAmelCase =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
_lowerCAmelCase =model_class.from_pretrained(_lowerCamelCase )
model.to(_lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
# Make sure we don't have nans
_lowerCAmelCase =after_outputs[0].cpu().numpy()
_lowerCAmelCase =0
_lowerCAmelCase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCAmelCase ( self ) -> Any:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCAmelCase ( self ) -> Any:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _lowerCAmelCase ( self ) -> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =ViTMAEModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _lowerCamelCase() -> Any:
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> Tuple:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCAmelCase =ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(_lowerCamelCase )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCAmelCase =ViTMAEConfig()
_lowerCAmelCase =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCAmelCase =np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**_lowerCamelCase , noise=torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase ) )
# verify the logits
_lowerCAmelCase =torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_lowerCAmelCase =torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_lowerCamelCase ) , atol=1e-4 ) )
| 369 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A : Optional[Any] = logging.get_logger(__name__)
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = ['''input_features''', '''is_longer''']
def __init__( self : str , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Optional[Any]=48000 , __lowerCamelCase : str=480 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : str=1024 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : int=False , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 14000 , __lowerCamelCase : int = None , __lowerCamelCase : str = "fusion" , __lowerCamelCase : str = "repeatpad" , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ : str = top_db
lowerCamelCase__ : Any = truncation
lowerCamelCase__ : List[str] = padding
lowerCamelCase__ : Union[str, Any] = fft_window_size
lowerCamelCase__ : Dict = (fft_window_size >> 1) + 1
lowerCamelCase__ : Optional[int] = hop_length
lowerCamelCase__ : List[str] = max_length_s
lowerCamelCase__ : Dict = max_length_s * sampling_rate
lowerCamelCase__ : List[Any] = sampling_rate
lowerCamelCase__ : Union[str, Any] = frequency_min
lowerCamelCase__ : str = frequency_max
lowerCamelCase__ : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCAmelCase , min_frequency=__lowerCAmelCase , max_frequency=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , norm=__lowerCAmelCase , mel_scale="htk" , )
lowerCamelCase__ : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCAmelCase , min_frequency=__lowerCAmelCase , max_frequency=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : np.array , __lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = spectrogram(
__lowerCAmelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__lowerCAmelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase__ : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase__ : Optional[Any] = [0]
# randomly choose index for each part
lowerCamelCase__ : List[str] = np.random.choice(ranges[0] )
lowerCamelCase__ : int = np.random.choice(ranges[1] )
lowerCamelCase__ : int = np.random.choice(ranges[2] )
lowerCamelCase__ : Any = mel[idx_front : idx_front + chunk_frames, :]
lowerCamelCase__ : str = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCamelCase__ : List[str] = mel[idx_back : idx_back + chunk_frames, :]
lowerCamelCase__ : Dict = torch.tensor(mel[None, None, :] )
lowerCamelCase__ : Optional[Any] = torch.nn.functional.interpolate(
__lowerCAmelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=__lowerCAmelCase )
lowerCamelCase__ : str = mel_shrink[0][0].numpy()
lowerCamelCase__ : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : np.array , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCamelCase__ : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCamelCase__ : Tuple = len(__lowerCAmelCase ) - max_length
lowerCamelCase__ : List[str] = np.random.randint(0 , overflow + 1 )
lowerCamelCase__ : Union[str, Any] = waveform[idx : idx + max_length]
lowerCamelCase__ : Optional[Any] = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCamelCase__ : List[str] = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters )
lowerCamelCase__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCamelCase__ : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCamelCase__ : List[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : str = self._random_mel_fusion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ : List[Any] = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
lowerCamelCase__ : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCamelCase__ : Any = int(max_length / len(__lowerCAmelCase ) )
lowerCamelCase__ : Tuple = np.stack(np.tile(__lowerCAmelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCamelCase__ : Optional[Any] = int(max_length / len(__lowerCAmelCase ) )
lowerCamelCase__ : List[str] = np.stack(np.tile(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ : Optional[Any] = np.pad(__lowerCAmelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
lowerCamelCase__ : Union[str, Any] = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters )
lowerCamelCase__ : int = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCamelCase__ : int = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : str = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = truncation if truncation is not None else self.truncation
lowerCamelCase__ : Tuple = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase__ : Union[str, Any] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowerCamelCase__ : str = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Dict = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
lowerCamelCase__ : List[Any] = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : int = [np.asarray(__lowerCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCamelCase__ : Dict = [
self._get_input_mel(__lowerCAmelCase , max_length if max_length else self.nb_max_samples , __lowerCAmelCase , __lowerCAmelCase )
for waveform in raw_speech
]
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Any = []
for mel, longer in padded_inputs:
input_mel.append(__lowerCAmelCase )
is_longer.append(__lowerCAmelCase )
if truncation == "fusion" and sum(__lowerCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCamelCase__ : Union[str, Any] = np.random.randint(0 , len(__lowerCAmelCase ) )
lowerCamelCase__ : Tuple = True
if isinstance(input_mel[0] , __lowerCAmelCase ):
lowerCamelCase__ : Optional[Any] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCamelCase__ : Dict = [[longer] for longer in is_longer]
lowerCamelCase__ : Optional[int] = {"input_features": input_mel, "is_longer": is_longer}
lowerCamelCase__ : Tuple = BatchFeature(__lowerCAmelCase )
if return_tensors is not None:
lowerCamelCase__ : str = input_features.convert_to_tensors(__lowerCAmelCase )
return input_features
| 184 |
from collections import deque
class A :
'''simple docstring'''
def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
A__ = process_name # process name
A__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A__ = arrival_time
A__ = burst_time # remaining burst time
A__ = 0 # total time of the process wait in ready queue
A__ = 0 # time from arrival time to completion time
class A :
'''simple docstring'''
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ) -> None:
"""simple docstring"""
A__ = number_of_queues
# time slice of queues that round robin algorithm applied
A__ = time_slices
# unfinished process is in this ready_queue
A__ = queue
# current time
A__ = current_time
# finished process is in this sequence queue
A__ = deque()
def a_ ( self : Dict ) -> list[str]:
"""simple docstring"""
A__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a_ ( self : Tuple , __lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a_ ( self : Optional[Any] , __lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a_ ( self : Dict , __lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a_ ( self : int , __lowerCAmelCase : deque[Process] ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def a_ ( self : Any , __lowerCAmelCase : Process ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a_ ( self : Union[str, Any] , __lowerCAmelCase : deque[Process] ) -> deque[Process]:
"""simple docstring"""
A__ = deque() # sequence deque of finished process
while len(__lowerCAmelCase ) != 0:
A__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A__ = 0
# set the process's turnaround time because it is finished
A__ = self.current_time - cp.arrival_time
# set the completion time
A__ = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCAmelCase )
self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a_ ( self : Optional[Any] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
A__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCAmelCase ) ):
A__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A__ = 0
# set the finish time
A__ = self.current_time
# update the process' turnaround time because it is finished
A__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCAmelCase )
self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a_ ( self : List[Any] ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
A__ , A__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Union[str, Any] = Process('''P1''', 0, 5_3)
A : Optional[Any] = Process('''P2''', 0, 1_7)
A : Optional[int] = Process('''P3''', 0, 6_8)
A : int = Process('''P4''', 0, 2_4)
A : Any = 3
A : List[Any] = [1_7, 2_5]
A : Optional[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A : Optional[Any] = Process('''P1''', 0, 5_3)
A : int = Process('''P2''', 0, 1_7)
A : Optional[int] = Process('''P3''', 0, 6_8)
A : Tuple = Process('''P4''', 0, 2_4)
A : Union[str, Any] = 3
A : Optional[Any] = [1_7, 2_5]
A : Tuple = deque([Pa, Pa, Pa, Pa])
A : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0)
A : Dict = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 274 | 0 |
def snake_case ( snake_case__ :float , snake_case__ :float) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""")
return 0.5 * mass * abs(snake_case__) * abs(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 81 | import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 81 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCamelCase : List[Any] = 1_0
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
for i in range(lowercase ,lowercase ):
if array[i] == target:
return i
return -1
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : Union[str, Any] = 0
snake_case : Optional[Any] = len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : List[str] = (left + right) // 3 + 1
snake_case : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
snake_case : List[str] = one_third - 1
elif array[two_third] < target:
snake_case : Any = two_third + 1
else:
snake_case : Dict = one_third + 1
snake_case : Any = two_third - 1
else:
return -1
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
if left < right:
if right - left < precision:
return lin_search(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : str = (left + right) // 3 + 1
snake_case : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase ,one_third - 1 ,lowercase ,lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,lowercase ,lowercase ,lowercase )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,lowercase ,lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
lowerCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCamelCase : int = int(input('Enter the number to be found in the list:\n').strip())
lowerCamelCase : Tuple = ite_ternary_search(collection, target)
lowerCamelCase : Any = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 124 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
snake_case : Optional[Any] = sum(lowercase ) / len(lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: float ):
return 0.0
def lowerCamelCase__ ( A__ : np.ndarray , A__ : int ):
'''simple docstring'''
__lowerCamelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowerCamelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCamelCase__ ( A__ : FilterType , A__ : int ):
'''simple docstring'''
__lowerCamelCase = 512
__lowerCamelCase = [1] + [0] * (size - 1)
__lowerCamelCase = [filter_type.process(A__ ) for item in inputs]
__lowerCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase = np.abs(np.fft.fft(A__ ) )
__lowerCamelCase = 20 * np.logaa(A__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
__lowerCamelCase = get_bounds(A__ , A__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(A__ )
plt.show()
def lowerCamelCase__ ( A__ : FilterType , A__ : int ):
'''simple docstring'''
__lowerCamelCase = 512
__lowerCamelCase = [1] + [0] * (size - 1)
__lowerCamelCase = [filter_type.process(A__ ) for item in inputs]
__lowerCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase = np.angle(np.fft.fft(A__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(A__ , -2 * pi ) )
plt.show()
| 29 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=64 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : str=1 , ) -> Tuple:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = q_groups
__lowerCamelCase = k_groups
__lowerCamelCase = v_groups
__lowerCamelCase = post_attention_groups
__lowerCamelCase = intermediate_groups
__lowerCamelCase = output_groups
def __A ( self : str ) -> Tuple:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Dict ) -> Optional[Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
__lowerCamelCase = SqueezeBertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
__lowerCamelCase = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
__lowerCamelCase = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
__lowerCamelCase = self.num_choices
__lowerCamelCase = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] ) -> Tuple:
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : List[Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ : Any = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : int = False
a__ : Dict = True
a__ : str = False
def __A ( self : Any ) -> Tuple:
__lowerCamelCase = SqueezeBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , dim=37 )
def __A ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def __A ( self : Optional[int] ) -> List[str]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict ) -> Tuple:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> str:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
@slow
def __A ( self : Dict ) -> Union[str, Any]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : str ) -> Optional[Any]:
__lowerCamelCase = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__lowerCamelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )[0]
__lowerCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 270 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 270 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
__lowerCAmelCase = """"""
while len(lowercase ) % 3 != 0:
__lowerCAmelCase = """0""" + bin_string
__lowerCAmelCase = [
bin_string[index : index + 3]
for index in range(len(lowercase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowerCAmelCase = 0
for index, val in enumerate(lowercase ):
oct_val += int(2 ** (2 - index) * int(lowercase ) )
oct_string += str(lowercase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Optional[int] = """▁"""
_a : Any = {"""vocab_file""": """sentencepiece.bpe.model"""}
_a : List[Any] = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
_a : Tuple = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] =["""input_ids""", """attention_mask"""]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="<mask>",__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = AddedToken(__SCREAMING_SNAKE_CASE,lstrip=__SCREAMING_SNAKE_CASE,rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,sep_token=__SCREAMING_SNAKE_CASE,cls_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase = 1
__lowerCAmelCase = len(self.sp_model ) + self.fairseq_offset
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
__lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE,token_ids_a=__SCREAMING_SNAKE_CASE,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE,""" """ ).strip()
return out_string
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 46 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ="▁"
_lowerCamelCase ={"vocab_file": "sentencepiece.bpe.model"}
_lowerCamelCase ={
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
_lowerCamelCase ={
"facebook/nllb-200-distilled-600M": 10_24,
}
# fmt: off
_lowerCamelCase =["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = ['input_ids', 'attention_mask']
__UpperCAmelCase = []
__UpperCAmelCase = []
def __init__( self : List[str] ,snake_case : List[str] ,snake_case : Any="<s>" ,snake_case : str="</s>" ,snake_case : List[str]="</s>" ,snake_case : Dict="<s>" ,snake_case : Dict="<unk>" ,snake_case : List[str]="<pad>" ,snake_case : List[str]="<mask>" ,snake_case : Optional[int]=None ,snake_case : str=None ,snake_case : Dict=None ,snake_case : Optional[Dict[str, Any]] = None ,snake_case : Optional[int]=None ,snake_case : Tuple=False ,**snake_case : List[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE =AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else mask_token
SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE =legacy_behaviour
super().__init__(
bos_token=snake_case ,eos_token=snake_case ,unk_token=snake_case ,sep_token=snake_case ,cls_token=snake_case ,pad_token=snake_case ,mask_token=snake_case ,tokenizer_file=snake_case ,src_lang=snake_case ,tgt_lang=snake_case ,additional_special_tokens=snake_case ,sp_model_kwargs=self.sp_model_kwargs ,legacy_behaviour=snake_case ,**snake_case ,)
SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
SCREAMING_SNAKE_CASE =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =len(self.sp_model )
SCREAMING_SNAKE_CASE ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case )
}
SCREAMING_SNAKE_CASE ={v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE =src_lang if src_lang is not None else 'eng_Latn'
SCREAMING_SNAKE_CASE =self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ):
SCREAMING_SNAKE_CASE =self.__dict__.copy()
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowerCAmelCase ( self : Dict ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCAmelCase ( self : Tuple ):
return self._src_lang
@src_lang.setter
def _lowerCAmelCase ( self : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCAmelCase ( self : str ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ,snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case ,token_ids_a=snake_case ,already_has_special_tokens=snake_case )
SCREAMING_SNAKE_CASE =[1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case )) + suffix_ones
return prefix_ones + ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones
def _lowerCAmelCase ( self : str ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCAmelCase ( self : Any ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.sep_token_id]
SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : int ,snake_case : str ,snake_case : str ,snake_case : Optional[str] ,snake_case : Optional[str] ,**snake_case : Optional[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE =src_lang
SCREAMING_SNAKE_CASE =self(snake_case ,add_special_tokens=snake_case ,return_tensors=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(snake_case )
SCREAMING_SNAKE_CASE =tgt_lang_id
return inputs
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE ={self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : str ):
return self.sp_model.encode(snake_case ,out_type=snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Any ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE =self.sp_model.PieceToId(snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Any ):
SCREAMING_SNAKE_CASE =''.join(snake_case ).replace(snake_case ,' ' ).strip()
return out_string
def _lowerCAmelCase ( self : Any ,snake_case : str ,snake_case : Optional[str] = None ):
if not os.path.isdir(snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE =os.path.join(
snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case ,'wb' ) as fi:
SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List[str] ,snake_case : str = "eng_Latn" ,snake_case : Optional[List[str]] = None ,snake_case : str = "fra_Latn" ,**snake_case : Dict ,):
SCREAMING_SNAKE_CASE =src_lang
SCREAMING_SNAKE_CASE =tgt_lang
return super().prepare_seqaseq_batch(snake_case ,snake_case ,**snake_case )
def _lowerCAmelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCAmelCase ( self : str ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCAmelCase ( self : List[Any] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =[self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE =[self.cur_lang_code]
SCREAMING_SNAKE_CASE =[self.eos_token_id]
def _lowerCAmelCase ( self : str ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.lang_code_to_id[lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =[self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE =[self.cur_lang_code]
SCREAMING_SNAKE_CASE =[self.eos_token_id]
| 334 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__snake_case = open # noqa: we just need to have a builtin inside this module to test it properly
| 169 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__snake_case = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
__snake_case = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
__snake_case = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
__snake_case = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
__snake_case = tf.keras.preprocessing.image.img_to_array(test_image)
__snake_case = np.expand_dims(test_image, axis=0)
__snake_case = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__snake_case = """Normal"""
if result[0][0] == 1:
__snake_case = """Abnormality detected"""
| 169 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__a: str = logging.get_logger(__name__)
__a: Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__a: Tuple = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__a: List[Any] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = RobertaTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ) -> str:
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
lowercase__ : Optional[Any] = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase__ : Optional[int] = add_prefix_space
lowercase__ : Any = pre_tok_class(**__lowerCAmelCase )
lowercase__ : Optional[Any] = add_prefix_space
lowercase__ : Union[str, Any] = '''post_processor'''
lowercase__ : List[Any] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
lowercase__ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ : List[str] = tuple(state['''sep'''] )
if "cls" in state:
lowercase__ : Tuple = tuple(state['''cls'''] )
lowercase__ : List[Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
lowercase__ : Union[str, Any] = add_prefix_space
lowercase__ : Dict = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
lowercase__ : Optional[int] = trim_offsets
lowercase__ : Optional[int] = True
if changes_to_apply:
lowercase__ : str = getattr(__lowerCAmelCase , state.pop('''type''' ) )
lowercase__ : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def _lowerCAmelCase( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
lowercase__ : str = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
lowercase__ : str = value
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : List[Any] = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : str = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
lowercase__ : Optional[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None ) -> Union[str, Any]:
lowercase__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 198 | '''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a: Optional[Any] = 16
__a: Any = 32
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 ):
lowercase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : List[Any] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : Dict = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
UpperCAmelCase , padding='''longest''' , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowercase__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a: Tuple = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCAmelCase ) == "1":
lowercase__ : Optional[int] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
lowercase__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : int = config['''lr''']
lowercase__ : Optional[int] = int(config['''num_epochs'''] )
lowercase__ : Optional[Any] = int(config['''seed'''] )
lowercase__ : int = int(config['''batch_size'''] )
set_seed(UpperCAmelCase )
lowercase__ , lowercase__ : str = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowercase__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowercase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase__ : Optional[Any] = os.path.split(UpperCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase__ : str = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowercase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(UpperCAmelCase ),
'''epoch''': epoch,
} , step=UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __UpperCamelCase ( ):
lowercase__ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCAmelCase , default=UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowercase__ : str = parser.parse_args()
lowercase__ : Tuple = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 198 | 1 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
for param in module.parameters():
__UpperCAmelCase : Any = False
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCAmelCase : int = "mps"
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = plt.imshow(a_ )
fig.axes.get_xaxis().set_visible(a_ )
fig.axes.get_yaxis().set_visible(a_ )
plt.show()
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = datetime.now()
__UpperCAmelCase : List[str] = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 367 |
"""simple docstring"""
UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_UpperCamelCase )
__UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
__UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
__UpperCAmelCase : List[str] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
__UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__UpperCAmelCase : str = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__UpperCAmelCase : List[str] = encoded_data[:-padding]
__UpperCAmelCase : int = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__UpperCAmelCase : Optional[Any] = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__UpperCAmelCase : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
from __future__ import annotations
import math
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
_lowerCAmelCase = [n]
for i in range(1 , len(snake_case ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if len(str(snake_case ) ) > 3:
if not is_prime(int(str(snake_case )[-3:] ) ) or not is_prime(int(str(snake_case )[:3] ) ):
return False
return True
def _UpperCAmelCase ( snake_case = 11 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 13
while len(snake_case ) != count:
if validate(snake_case ):
_lowerCAmelCase = list_truncated_nums(snake_case )
if all(is_prime(snake_case ) for i in list_nums ):
list_truncated_primes.append(snake_case )
num += 2
return list_truncated_primes
def _UpperCAmelCase ( ):
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"{sum(compute_truncated_primes(11)) = }")
| 82 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : List[Any] = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case (A_ :Dict ):
'''simple docstring'''
a : str = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def snake_case (A_ :Any , A_ :List[Any] ):
'''simple docstring'''
a : Union[str, Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def snake_case (A_ :Dict ):
'''simple docstring'''
a : int = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def snake_case ():
'''simple docstring'''
a : int = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def snake_case (A_ :int , A_ :Optional[int] , A_ :Dict , A_ :Dict ):
'''simple docstring'''
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = 1_0_0_0
a : Tuple = 'huggingface/label-files'
a : List[Any] = num_labels
a : List[str] = json.load(open(cached_download(hf_hub_url(A_ , A_ , repo_type='dataset' ) ) , 'r' ) )
a : int = {int(A_ ): v for k, v in idalabel.items()}
a : str = idalabel
a : Optional[int] = {v: k for k, v in idalabel.items()}
a : Tuple = CvtConfig(num_labels=A_ , idalabel=A_ , labelaid=A_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
a : int = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
a : List[Any] = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a : Optional[int] = [2, 2, 2_0]
a : Any = [3, 1_2, 1_6]
a : str = [1_9_2, 7_6_8, 1_0_2_4]
a : List[Any] = CvtForImageClassification(A_ )
a : Optional[int] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
a : Union[str, Any] = image_size
a : Optional[Any] = torch.load(A_ , map_location=torch.device('cpu' ) )
a : int = OrderedDict()
a : Any = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a : Dict = list_of_state_dict + cls_token(A_ )
a : Any = list_of_state_dict + embeddings(A_ )
for cnt in range(config.depth[idx] ):
a : Dict = list_of_state_dict + attention(A_ , A_ )
a : Any = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A_ )
for i in range(len(A_ ) ):
a : List[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A_ )
model.save_pretrained(A_ )
image_processor.save_pretrained(A_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_UpperCamelCase : int = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 186 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCAmelCase_ : Tuple = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "train"
__UpperCamelCase = "dev"
__UpperCamelCase = "test"
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Union[Split, str]):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : str):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : List[InputExample] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : PreTrainedTokenizer , lowercase_ : str=False , lowercase_ : Dict="[CLS]" , lowercase_ : List[Any]=1 , lowercase_ : Optional[int]="[SEP]" , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : str=0 , lowercase_ : Dict=0 , lowercase_ : Union[str, Any]=-100 , lowercase_ : List[str]=0 , lowercase_ : Any=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = {label: i for i, label in enumerate(lowercase_)}
SCREAMING_SNAKE_CASE_ : int = []
for ex_index, example in enumerate(lowercase_):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' , lowercase_ , len(lowercase_))
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for word, label in zip(example.words , example.labels):
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize(lowercase_)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase_) > 0:
tokens.extend(lowercase_)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase_) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.num_special_tokens_to_add()
if len(lowercase_) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE_ : Dict = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [sequence_a_segment_id] * len(lowercase_)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE_ : int = [cls_token] + tokens
SCREAMING_SNAKE_CASE_ : Any = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE_ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_tokens_to_ids(lowercase_)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE_ : Dict = [1 if mask_padding_with_zero else 0] * len(lowercase_)
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_seq_length - len(lowercase_)
if pad_on_left:
SCREAMING_SNAKE_CASE_ : int = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE_ : int = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE_ : Optional[int] = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE_ : str = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase_) == max_seq_length
assert len(lowercase_) == max_seq_length
assert len(lowercase_) == max_seq_length
assert len(lowercase_) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''')
logger.info('''guid: %s''' , example.guid)
logger.info('''tokens: %s''' , ''' '''.join([str(lowercase_) for x in tokens]))
logger.info('''input_ids: %s''' , ''' '''.join([str(lowercase_) for x in input_ids]))
logger.info('''input_mask: %s''' , ''' '''.join([str(lowercase_) for x in input_mask]))
logger.info('''segment_ids: %s''' , ''' '''.join([str(lowercase_) for x in segment_ids]))
logger.info('''label_ids: %s''' , ''' '''.join([str(lowercase_) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE_ : Dict = None
features.append(
InputFeatures(
input_ids=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , label_ids=lowercase_))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[str] , lowercase_ : TokenClassificationTask , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : Tuple=False , lowercase_ : Split = Split.train , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = os.path.join(
lowercase_ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(lowercase_)) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Tuple = cached_features_file + '''.lock'''
with FileLock(lowercase_):
if os.path.exists(lowercase_) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}')
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(lowercase_)
else:
logger.info(F'Creating features from dataset file at {data_dir}')
SCREAMING_SNAKE_CASE_ : List[Any] = token_classification_task.read_examples_from_file(lowercase_ , lowercase_)
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE_ : Dict = token_classification_task.convert_examples_to_features(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , cls_token_at_end=bool(model_type in ['''xlnet''']) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(tokenizer.padding_side == '''left''') , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}')
torch.save(self.features , lowercase_)
def __len__( self : List[str]):
'''simple docstring'''
return len(self.features)
def __getitem__( self : List[Any] , lowercase_ : str):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = -1_0_0
def __init__( self : List[str] , lowercase_ : TokenClassificationTask , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : str=False , lowercase_ : Split = Split.train , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = token_classification_task.read_examples_from_file(lowercase_ , lowercase_)
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_classification_task.convert_examples_to_features(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , cls_token_at_end=bool(model_type in ['''xlnet''']) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(tokenizer.padding_side == '''left''') , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE_ : Tuple = tf.data.Dataset.from_generator(
lowercase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None]), '''attention_mask''': tf.TensorShape([None])},
tf.TensorShape([None]),
) , )
else:
SCREAMING_SNAKE_CASE_ : str = tf.data.Dataset.from_generator(
lowercase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None]),
'''attention_mask''': tf.TensorShape([None]),
'''token_type_ids''': tf.TensorShape([None]),
},
tf.TensorShape([None]),
) , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self : Optional[Any]):
'''simple docstring'''
return len(self.features)
def __getitem__( self : Tuple , lowercase_ : str):
'''simple docstring'''
return self.features[i]
| 91 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = """▁"""
UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase_ : str = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCAmelCase_ : str = {
"""facebook/xglm-564M""": 2048,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Tuple = [F'<madeupword{i}>' for i in range(self.num_madeup_words)]
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''additional_special_tokens''' , [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
if token_ids_a is None:
return [1] + ([0] * len(lowercase_))
return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str):
'''simple docstring'''
return self.sp_model.encode(lowercase_ , out_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowercase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(lowercase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , '''wb''') as fi:
SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 91 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__UpperCamelCase : Tuple = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__UpperCamelCase : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__UpperCamelCase : str = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__UpperCamelCase : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__UpperCamelCase : str = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__UpperCamelCase : Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__UpperCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__UpperCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__UpperCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_MAPPING
__UpperCamelCase : Dict = auto_class_update(FlaxAutoModel)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__UpperCamelCase : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCamelCase : List[str] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__UpperCamelCase : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__UpperCamelCase : Any = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
UpperCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__UpperCamelCase : Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 370 |
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =TFCamembertModel.from_pretrained("jplu/tf-camembert-base")
lowerCamelCase__: int =tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__: Tuple =model(UpperCAmelCase_)["last_hidden_state"]
lowerCamelCase__: Union[str, Any] =tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape , UpperCAmelCase_)
# compare the actual values for a slice.
lowerCamelCase__: List[str] =tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 10 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
a : List[str] = str(id_ )
a : Optional[Any] = None
a : Tuple = None
a : str = []
a : Any = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase__ ) -> Any:
return self.key < other.key
def __repr__( self ) -> Optional[Any]:
return self.id
def __a ( self , lowerCAmelCase__ ) -> Any:
self.neighbors.append(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Optional[Any] = weight
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowercase )
graph[b - 1].add_edge(graph[a - 1] , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Vertex ) ->list:
'''simple docstring'''
a : int = []
for u in graph:
a : List[str] = math.inf
a : int = None
a : str = 0
a : Union[str, Any] = graph[:]
while q:
a : List[Any] = min(_lowercase )
q.remove(_lowercase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
a : List[Any] = u
a : Optional[int] = u.edges[v.id]
for i in range(1 , len(_lowercase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Vertex ) ->Iterator[tuple]:
'''simple docstring'''
for u in graph:
a : str = math.inf
a : Dict = None
a : Dict = 0
a : List[Any] = list(_lowercase )
hq.heapify(_lowercase )
while h:
a : Dict = hq.heappop(_lowercase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
a : Dict = u
a : Optional[Any] = u.edges[v.id]
hq.heapify(_lowercase )
for i in range(1 , len(_lowercase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A (A_ ):
'''simple docstring'''
__lowerCamelCase : str = '''roformer'''
def __init__( self : Optional[Any] , __lowerCAmelCase : int=5_00_00 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=7_68 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Tuple=30_72 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=15_36 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : Any=1e-12 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[Any]=True , **__lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
A__ = vocab_size
A__ = hidden_size if embedding_size is None else embedding_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = rotary_value
A__ = use_cache
class A (A_ ):
'''simple docstring'''
@property
def a_ ( self : Any ) -> Tuple:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 368 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
A__ = prime_factors(__a )
if is_square_free(__a ):
return -1 if len(__a ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
def A ( a_ = 1_000_000 ) -> int:
__UpperCamelCase : List[Any] =limit + 1
__UpperCamelCase : Any =[0] * limit
for first_term in range(1 ,a_ ):
for n in range(a_ ,a_ ,a_ ):
__UpperCamelCase : str =first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__UpperCamelCase : Dict =sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 71 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> float:
_lowercase : Optional[Any] = 0.00
_lowercase : Dict = 0
for resistor in resistors:
if resistor <= 0:
_lowercase : Union[str, Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(snake_case )
first_sum += 1 / float(snake_case )
index += 1
return 1 / first_sum
def _A ( snake_case ) -> float:
_lowercase : Dict = 0.00
_lowercase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowercase : Dict = F'''Resistor at index {index} has a negative value!'''
raise ValueError(snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] )-> Optional[int]:
snake_case = """ZinengTang/tvlt-base"""
snake_case = tempfile.mkdtemp()
def lowerCAmelCase ( self : List[Any] , **__snake_case : Optional[int] )-> List[str]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **__snake_case )
def lowerCAmelCase ( self : int , **__snake_case : Any )-> Union[str, Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] )-> Any:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
processor.save_pretrained(self.tmpdirname )
snake_case = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __snake_case )
self.assertIsInstance(processor.image_processor , __snake_case )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_20_00] )
snake_case = feature_extractor(__snake_case , return_tensors="""np""" )
snake_case = processor(audio=__snake_case , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase ( self : List[str] )-> str:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([3, 2_24, 2_24] )
snake_case = image_processor(__snake_case , return_tensors="""np""" )
snake_case = processor(images=__snake_case , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_20_00] )
snake_case = np.ones([3, 2_24, 2_24] )
snake_case = processor(audio=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 363 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase ( A__ ):
"""simple docstring"""
def __lt__( self ,a_ ) -> int:
return self[-1] < other[-1]
def __eq__( self ,a_ ) -> Optional[int]:
return self[-1] == other[-1]
def snake_case_ ( lowerCAmelCase_ )-> list:
'''simple docstring'''
_UpperCAmelCase : List[Any] = []
# sort into stacks
for element in collection:
_UpperCAmelCase : Tuple = Stack([element] )
_UpperCAmelCase : Tuple = bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if i != len(lowerCAmelCase_ ):
stacks[i].append(lowerCAmelCase_ )
else:
stacks.append(lowerCAmelCase_ )
# use a heap-based merge to merge stack efficiently
_UpperCAmelCase : List[str] = merge(*(reversed(lowerCAmelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
A_ : List[str] = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 215 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 0 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = "Muhammad Umer Farooq"
SCREAMING_SNAKE_CASE__ = "MIT"
SCREAMING_SNAKE_CASE__ = "1.0.0"
SCREAMING_SNAKE_CASE__ = "Muhammad Umer Farooq"
SCREAMING_SNAKE_CASE__ = "[email protected]"
SCREAMING_SNAKE_CASE__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
super().__init__()
snake_case = []
snake_case = domain
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case = parse.urljoin(self.domain , lowerCAmelCase )
self.urls.append(lowerCAmelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(_UpperCamelCase ).split('.' )[-2:] )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return parse.urlparse(_UpperCamelCase ).netloc
def lowerCAmelCase__ ( _UpperCamelCase : str = "https://github.com" ) -> list[str]:
"""simple docstring"""
snake_case = get_domain_name(_UpperCamelCase )
# Initialize the parser
snake_case = Parser(_UpperCamelCase )
try:
# Open URL
snake_case = requests.get(_UpperCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case = requests.get(_UpperCamelCase )
# Get the valid email.
snake_case = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 149 | """simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self
snake_case = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase )
yield node.data
snake_case = node.next_node
@property
def snake_case ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = Node(1)
SCREAMING_SNAKE_CASE__ = Node(2)
SCREAMING_SNAKE_CASE__ = Node(3)
SCREAMING_SNAKE_CASE__ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = Node(1)
print(root_node.has_loop) # False
| 149 | 1 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['input_values', 'attention_mask']
def __init__(self , a_ = 1 , a_ = 1_60_00 , a_ = 0.0 , a_ = False , a_ = 80 , a_ = 16 , a_ = 64 , a_ = "hann_window" , a_ = 1.0 , a_ = 80 , a_ = 76_00 , a_ = 1E-10 , a_ = 2 , a_ = True , **a_ , ):
'''simple docstring'''
super().__init__(feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ )
__snake_case : Optional[Any] = do_normalize
__snake_case : Optional[int] = return_attention_mask
__snake_case : int = num_mel_bins
__snake_case : List[str] = hop_length
__snake_case : List[Any] = win_length
__snake_case : Union[str, Any] = win_function
__snake_case : List[Any] = frame_signal_scale
__snake_case : List[Any] = fmin
__snake_case : int = fmax
__snake_case : Optional[int] = mel_floor
__snake_case : List[str] = reduction_factor
__snake_case : Union[str, Any] = win_length * sampling_rate // 10_00
__snake_case : Dict = hop_length * sampling_rate // 10_00
__snake_case : str = optimal_fft_length(self.sample_size )
__snake_case : Union[str, Any] = (self.n_fft // 2) + 1
__snake_case : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=a_ )
__snake_case : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , a_ , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , a_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE (a_ , a_ , a_ = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
__snake_case : Tuple = np.array(a_ , np.intaa )
__snake_case : Dict = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
__snake_case : str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__snake_case : Union[str, Any] = padding_value
normed_input_values.append(a_ )
else:
__snake_case : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE (self , a_ , ):
'''simple docstring'''
__snake_case : Any = spectrogram(
a_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__(self , a_ = None , a_ = None , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
__snake_case : int = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
else:
__snake_case : Optional[int] = None
if audio_target is not None:
__snake_case : int = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
if inputs is None:
return inputs_target
else:
__snake_case : Union[str, Any] = inputs_target['''input_values''']
__snake_case : Union[str, Any] = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
__snake_case : Optional[Any] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : List[Any] = isinstance(a_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__snake_case : int = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case : Any = [np.asarray(a_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
__snake_case : Optional[int] = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__snake_case : Union[str, Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case : List[Any] = [speech]
# needed to make pad() work on spectrogram inputs
__snake_case : List[str] = self.feature_size
# convert into correct format for padding
if is_target:
__snake_case : List[str] = [self._extract_mel_features(a_ ) for waveform in speech]
__snake_case : List[str] = BatchFeature({'''input_values''': features} )
__snake_case : Dict = self.num_mel_bins
else:
__snake_case : Dict = BatchFeature({'''input_values''': speech} )
__snake_case : Dict = self.pad(
a_ , padding=a_ , max_length=a_ , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , **a_ , )
__snake_case : List[Any] = feature_size_hack
# convert input values to correct format
__snake_case : Dict = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
__snake_case : Union[str, Any] = [np.asarray(a_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(a_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__snake_case : Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(a_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__snake_case : Optional[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
__snake_case : Dict = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__snake_case : List[str] = [np.asarray(a_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__snake_case : Optional[int] = (
attention_mask
if self._get_padding_strategies(a_ , max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__snake_case : Dict = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=a_ , padding_value=self.padding_value )
if return_tensors is not None:
__snake_case : Tuple = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__snake_case : Any = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 102 | from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ = None , lowercase_ = None ):
if start is None:
UpperCAmelCase = 0
if end is None:
UpperCAmelCase = len(a__ ) - 1
if start >= end:
return
UpperCAmelCase = (start + end) // 2
slowsort(a__ , a__ , a__ )
slowsort(a__ , mid + 1 , a__ )
if sequence[end] < sequence[mid]:
UpperCAmelCase , UpperCAmelCase = sequence[mid], sequence[end]
slowsort(a__ , a__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 181 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_lowerCamelCase , )
assert hasattr(self , '''env''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
SCREAMING_SNAKE_CASE : List[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_lowerCamelCase , instance_count=_lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_lowerCamelCase , py_version='''py36''' , )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
TrainingJobAnalytics(_lowerCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = self.create_estimator(_lowerCamelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _lowerCamelCase )
| 313 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : List[Any] = patch_size
A_ : Optional[Any] = num_channels
A_ : List[Any] = is_training
A_ : Tuple = use_labels
A_ : Union[str, Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = ViTMSNModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ):
'''simple docstring'''
A_ : Dict = self.type_sequence_label_size
A_ : Tuple = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : int = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = ViTMSNModelTester(self )
A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Optional[Any]:
A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(2 )
A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case )
A_ : List[str] = self.default_image_processor
A_ : int = prepare_img()
A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**snake_case )
# verify the logits
A_ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 300 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __a ( unittest.TestCase ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase__ : Optional[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__A )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = logging.get_verbosity()
UpperCamelCase__ : List[str] = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ : Optional[int] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__A ) as cl:
logger.warning(__A )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__A ) as cl:
logger.warning(__A )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__A ) as cl:
logger.warning(__A )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(__A )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __lowercase ( self : str ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase__ : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ : str = os.getenv("TRANSFORMERS_VERBOSITY" , __A )
UpperCamelCase__ : Optional[Any] = logging.log_levels[env_level_str]
UpperCamelCase__ : Union[str, Any] = logging.get_verbosity()
self.assertEqual(
__A , __A , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
UpperCamelCase__ : Any = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __lowercase ( self : Any ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ : Optional[int] = logging.logging.getLogger()
with CaptureLogger(__A ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ : List[str] = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ : List[str] = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__A ) as cl:
logger.warning_advice(__A )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__A ) as cl:
logger.warning_advice(__A )
self.assertEqual(cl.out , msg + "\n" )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled() | 351 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ) -> None:
if start is None:
UpperCamelCase__ : Union[str, Any] = 0
if end is None:
UpperCamelCase__ : List[Any] = len(__lowerCAmelCase ) - 1
if start >= end:
return
UpperCamelCase__ : Union[str, Any] = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod() | 196 | 0 |
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : str = 'mvp'
a : Optional[Any] = ['past_key_values']
a : int = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , __lowercase : Tuple=50267 , __lowercase : Optional[int]=1024 , __lowercase : Any=12 , __lowercase : List[Any]=4096 , __lowercase : Optional[Any]=16 , __lowercase : List[str]=12 , __lowercase : Optional[Any]=4096 , __lowercase : Tuple=16 , __lowercase : Union[str, Any]=0.0 , __lowercase : str=0.0 , __lowercase : Any="gelu" , __lowercase : Any=1024 , __lowercase : List[str]=0.1 , __lowercase : List[Any]=0.0 , __lowercase : int=0.0 , __lowercase : str=0.02 , __lowercase : Tuple=0.0 , __lowercase : Union[str, Any]=False , __lowercase : Dict=True , __lowercase : List[Any]=1 , __lowercase : Optional[Any]=0 , __lowercase : Union[str, Any]=2 , __lowercase : Optional[int]=True , __lowercase : Dict=2 , __lowercase : int=2 , __lowercase : Union[str, Any]=False , __lowercase : Union[str, Any]=100 , __lowercase : str=800 , **__lowercase : Union[str, Any] , ) -> Any:
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = d_model
__UpperCAmelCase : int = encoder_ffn_dim
__UpperCAmelCase : Tuple = encoder_layers
__UpperCAmelCase : List[str] = encoder_attention_heads
__UpperCAmelCase : Any = decoder_ffn_dim
__UpperCAmelCase : List[Any] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Optional[int] = dropout
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : List[Any] = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = encoder_layerdrop
__UpperCAmelCase : Union[str, Any] = decoder_layerdrop
__UpperCAmelCase : List[Any] = classifier_dropout
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : int = encoder_layers
__UpperCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Tuple = use_prompt
__UpperCAmelCase : str = prompt_length
__UpperCAmelCase : Union[str, Any] = prompt_mid_dim
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __lowercase ):
__UpperCAmelCase : Optional[Any] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 114 | 0 |
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
A : Union[str, Any] = b * b - 4 * a * c
A : List[str] = (-b + sqrt(_lowerCamelCase )) / (2 * a)
A : Optional[Any] = (-b - sqrt(_lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
A , A : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main() | 256 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
__SCREAMING_SNAKE_CASE = {"""bert_for_seq_generation""": 512}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = []
a__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Optional[int]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Tuple , ) -> None:
A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
A : Union[str, Any] = vocab_file
A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
A : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Tuple:
A : Tuple = self.__dict__.copy()
A : Optional[int] = None
return state
def __setstate__( self : Dict , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A : int = {}
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] ) -> Dict:
return self.sp_model.piece_to_id(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
A : Optional[int] = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[int] ) -> List[str]:
A : List[str] = []
A : List[str] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
A : Union[str, Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : str = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
A : str = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,) | 256 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = MBartConfig
_UpperCAmelCase :str = {}
_UpperCAmelCase :Union[str, Any] = "gelu"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ):
lowercase__: List[str] = parent
lowercase__: List[Any] = batch_size
lowercase__: List[Any] = seq_length
lowercase__: str = is_training
lowercase__: List[str] = use_labels
lowercase__: Optional[int] = vocab_size
lowercase__: int = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: Tuple = intermediate_size
lowercase__: Optional[Any] = hidden_dropout_prob
lowercase__: Optional[int] = attention_probs_dropout_prob
lowercase__: str = max_position_embeddings
lowercase__: Union[str, Any] = eos_token_id
lowercase__: int = pad_token_id
lowercase__: List[str] = bos_token_id
def _snake_case ( self ):
lowercase__: int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase__: str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__: Optional[int] = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
lowercase__: Tuple = inputs_dict['''input_ids''']
lowercase__: Optional[Any] = input_ids[:1, :]
lowercase__: Optional[int] = inputs_dict['''attention_mask'''][:1, :]
lowercase__: List[str] = inputs_dict['''head_mask''']
lowercase__: Optional[int] = 1
# first forward pass
lowercase__: List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
lowercase__, lowercase__: Any = outputs.to_tuple()
lowercase__: List[str] = past_key_values[1]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> int:
if attention_mask is None:
lowercase__: Union[str, Any] = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__: List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__: str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__: List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_UpperCAmelCase :List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase :List[str] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :str = True
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :str = False
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _snake_case ( self ):
lowercase__: Tuple = TFMBartModelTester(self )
lowercase__: Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
]
_UpperCAmelCase :Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
_UpperCAmelCase :Tuple = "facebook/mbart-large-en-ro"
@cached_property
def _snake_case ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _snake_case ( self ):
lowercase__: Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _snake_case ( self , **_UpperCAmelCase ):
lowercase__: List[Any] = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def _snake_case ( self , **_UpperCAmelCase ):
lowercase__: str = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='''tf''' )
lowercase__: Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase__: Tuple = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _snake_case ( self ):
self._assert_generated_batch_equal_expected()
| 177 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = ['pixel_values']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 0.9 , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = size if size is not None else {"""shortest_edge""": 224}
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" )
lowercase = do_resize
lowercase = size
lowercase = crop_pct
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase = int(size["""height"""] / crop_pct )
else:
lowercase = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowerCAmelCase ) )
lowercase = get_resize_output_image_size(__lowerCAmelCase , size=__lowerCAmelCase , default_to_square=__lowerCAmelCase )
else:
if "shortest_edge" in size:
lowercase = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase )
elif "height" in size and "width" in size:
lowercase = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowerCAmelCase ) )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = crop_pct if crop_pct is not None else self.crop_pct
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" )
lowercase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , crop_pct=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 32 | """simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
__lowerCAmelCase : Dict =tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase__ , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase__ , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase__ ).group(1 )
lowercase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
lowercase = count_samples(lowerCAmelCase__ )
lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
lowercase = dataset.shuffle(len(lowerCAmelCase__ ) )
lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
lowercase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
lowercase = initialize_tpu(lowerCAmelCase__ )
lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase = count_samples(lowerCAmelCase__ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ :Any ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ :Dict ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase , lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =parse_args()
main(args)
| 32 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__A = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__A = [ord(letter) for letter in string.ascii_lowercase]
__A = {ord(char) for char in VALID_CHARS}
__A = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str | None:
__lowerCAmelCase: str = ""
__lowerCAmelCase: int
__lowerCAmelCase: int
__lowerCAmelCase: int
for keychar, cipherchar in zip(cycle(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__SCREAMING_SNAKE_CASE )
return decoded
def a__ ( __SCREAMING_SNAKE_CASE ) -> list[str]:
__lowerCAmelCase: list[str] = []
for key in product(__SCREAMING_SNAKE_CASE , repeat=3 ):
__lowerCAmelCase: str = try_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if encoded is not None:
possibles.append(__SCREAMING_SNAKE_CASE )
return possibles
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def a__ ( __SCREAMING_SNAKE_CASE = "p059_cipher.txt" ) -> int:
__lowerCAmelCase: list[int]
__lowerCAmelCase: list[str]
__lowerCAmelCase: str
__lowerCAmelCase: str
__lowerCAmelCase: str = Path(__SCREAMING_SNAKE_CASE ).parent.joinpath(__SCREAMING_SNAKE_CASE ).read_text(encoding="utf-8" )
__lowerCAmelCase: Dict = [int(__SCREAMING_SNAKE_CASE ) for number in data.strip().split("," )]
__lowerCAmelCase: Tuple = filter_valid_chars(__SCREAMING_SNAKE_CASE )
for common_word in COMMON_WORDS:
__lowerCAmelCase: int = filter_common_word(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
__lowerCAmelCase: Dict = possibles[0]
return sum(ord(__SCREAMING_SNAKE_CASE ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 217 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
__lowerCAmelCase: Tuple = str(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) == 9 and set(__SCREAMING_SNAKE_CASE ) == set("123456789" )
def a__ ( ) -> int | None:
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
__lowerCAmelCase: Tuple = 1_0_0_0_0_2 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
__lowerCAmelCase: int = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 217 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class __A( nn.Module ):
snake_case_ = 4_2
snake_case_ = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a , __a , __a , __a = hidden_states.shape
__a = jax.image.resize(
_snake_case , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
__a = self.conv(_snake_case )
return hidden_states
class __A( nn.Module ):
snake_case_ = 4_2
snake_case_ = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.conv(_snake_case )
return hidden_states
class __A( nn.Module ):
snake_case_ = 4_2
snake_case_ = None
snake_case_ = 0.0
snake_case_ = None
snake_case_ = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.in_channels if self.out_channels is None else self.out_channels
__a = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__a = nn.Conv(
_snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__a = nn.Dense(_snake_case , dtype=self.dtype )
__a = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__a = nn.Dropout(self.dropout_prob )
__a = nn.Conv(
_snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__a = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__a = None
if use_nin_shortcut:
__a = nn.Conv(
_snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , _snake_case , _snake_case , _snake_case=True ) -> List[str]:
'''simple docstring'''
__a = hidden_states
__a = self.norma(_snake_case )
__a = nn.swish(_snake_case )
__a = self.conva(_snake_case )
__a = self.time_emb_proj(nn.swish(_snake_case ) )
__a = jnp.expand_dims(jnp.expand_dims(_snake_case , 1 ) , 1 )
__a = hidden_states + temb
__a = self.norma(_snake_case )
__a = nn.swish(_snake_case )
__a = self.dropout(_snake_case , _snake_case )
__a = self.conva(_snake_case )
if self.conv_shortcut is not None:
__a = self.conv_shortcut(_snake_case )
return hidden_states + residual | 353 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train() | 33 | 0 |
def lowerCAmelCase_ ( _snake_case : int = 50 ) -> int:
'''simple docstring'''
__magic_name__ : str = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 281 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case : List[Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
snake_case : Any = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
snake_case : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=True , _a=False ):
if rouge_types is None:
__magic_name__ : str = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__magic_name__ : List[str] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
__magic_name__ : Dict = scoring.BootstrapAggregator()
else:
__magic_name__ : str = []
for ref, pred in zip(_a , _a ):
__magic_name__ : Union[str, Any] = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
__magic_name__ : Any = aggregator.aggregate()
else:
__magic_name__ : List[Any] = {}
for key in scores[0]:
__magic_name__ : str = [score[key] for score in scores]
return result
| 281 | 1 |
def a( ) -> str:
"""simple docstring"""
a = 0
for i in range(1 , 1001 ):
total += i**i
return str(A )[-10:]
if __name__ == "__main__":
print(solution())
| 71 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
a = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
a = embedding_size
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertModel(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
a = [input_ids, input_mask]
a = model(lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForMaskedLM(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForNextSentencePrediction(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForPreTraining(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForSequenceClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_choices
a = TFMobileBertForMultipleChoice(config=lowerCamelCase_ )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForTokenClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForQuestionAnswering(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertModelTest.TFMobileBertModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
a = TFMobileBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
a = tf.constant([[0, 1, 2, 3, 4, 5]] )
a = model(lowerCamelCase_ )[0]
a = [1, 6, 30522]
self.assertEqual(output.shape , lowerCamelCase_ )
a = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 71 | 1 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = StableDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__A = load_file(__UpperCamelCase )
__A = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__A = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
__A = pipeline.text_encoder
else:
__A = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
__A = pipeline.unet
# find the target layer
__A = layer_infos.pop(0 )
while len(__UpperCamelCase ) > -1:
try:
__A = curr_layer.__getattr__(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
__A = layer_infos.pop(0 )
elif len(__UpperCamelCase ) == 0:
break
except Exception:
if len(__UpperCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__A = layer_infos.pop(0 )
__A = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(__UpperCamelCase )
else:
pair_keys.append(__UpperCamelCase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__A = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__A = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__UpperCamelCase , __UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
__A = state_dict[pair_keys[0]].to(torch.floataa )
__A = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__UpperCamelCase , __UpperCamelCase )
# update visited list
for item in pair_keys:
visited.append(__UpperCamelCase )
return pipeline
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
lowercase_ = parser.parse_args()
lowercase_ = args.base_model_path
lowercase_ = args.checkpoint_path
lowercase_ = args.dump_path
lowercase_ = args.lora_prefix_unet
lowercase_ = args.lora_prefix_text_encoder
lowercase_ = args.alpha
lowercase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowercase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 266 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
], )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ):
'''simple docstring'''
__A = compute_mauve(
p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, )
return out
| 266 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __lowerCAmelCase , )
class __a ( __lowerCAmelCase ):
_a : Any = RobertaConfig
_a : Union[str, Any] = "roberta"
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(lowerCamelCase__ )
_UpperCAmelCase = RobertaEmbeddings(lowerCamelCase__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __lowerCAmelCase , )
class __a ( __lowerCAmelCase ):
_a : List[str] = RobertaConfig
_a : str = "roberta"
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCamelCase__ )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = config.num_hidden_layers
_UpperCAmelCase = DeeRobertaModel(lowerCamelCase__ )
_UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_UpperCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=False , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_layers
try:
_UpperCAmelCase = self.roberta(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , position_ids=lowerCamelCase__ , head_mask=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , )
_UpperCAmelCase = outputs[1]
_UpperCAmelCase = self.dropout(lowerCamelCase__ )
_UpperCAmelCase = self.classifier(lowerCamelCase__ )
_UpperCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_UpperCAmelCase = e.message
_UpperCAmelCase = e.exit_layer
_UpperCAmelCase = outputs[0]
if not self.training:
_UpperCAmelCase = entropy(lowerCamelCase__ )
_UpperCAmelCase = []
_UpperCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase = MSELoss()
_UpperCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_UpperCAmelCase = []
for highway_exit in outputs[-1]:
_UpperCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase = MSELoss()
_UpperCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase__ )
if train_highway:
_UpperCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_UpperCAmelCase = (loss,) + outputs
if not self.training:
_UpperCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_UpperCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 357 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__ :Optional[Any] = datasets.logging.get_logger(__name__)
lowerCAmelCase__ :str = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowerCAmelCase__ :List[Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowerCAmelCase__ :Optional[int] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def lowerCAmelCase__ ( a__: int , a__: int , a__: Dict=False , a__: str=False , a__: Optional[int]=True , a__: Any=False , a__: str="dummy_doc" ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {doc: key_lines}
_UpperCAmelCase = {doc: sys_lines}
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(a__ , key_doc_lines[doc] , a__ )
key_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(a__ , sys_doc_lines[doc] , a__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
if remove_nested:
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(a__ , a__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(a__ , a__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_UpperCAmelCase = reader.get_mention_assignments(a__ , a__ )
_UpperCAmelCase = reader.get_mention_assignments(a__ , a__ )
_UpperCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def lowerCAmelCase__ ( a__: Any , a__: List[str] , a__: List[str] , a__: Optional[int] , a__: Optional[Any] , a__: Any , a__: Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = get_coref_infos(a__ , a__ , a__ , a__ , a__ , a__ )
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for name, metric in metrics:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = evaluator.evaluate_documents(a__ , a__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(1_0 ) , F'''Recall: {recall * 1_0_0:.2f}''' , F''' Precision: {precision * 1_0_0:.2f}''' , F''' F1: {fa * 1_0_0:.2f}''' , )
if conll_subparts_num == 3:
_UpperCAmelCase = (conll / 3) * 1_0_0
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
_UpperCAmelCase = line.split()[5]
if not parse_col == "-":
_UpperCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
_UpperCAmelCase = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
_UpperCAmelCase = util.check_gold_parse_annotation(_SCREAMING_SNAKE_CASE )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_UpperCAmelCase = evaluate(
key_lines=_SCREAMING_SNAKE_CASE , sys_lines=_SCREAMING_SNAKE_CASE , metrics=_SCREAMING_SNAKE_CASE , NP_only=_SCREAMING_SNAKE_CASE , remove_nested=_SCREAMING_SNAKE_CASE , keep_singletons=_SCREAMING_SNAKE_CASE , min_span=_SCREAMING_SNAKE_CASE , )
return score
| 185 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_lowerCAmelCase :Any = None
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase :str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase :Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_lowerCAmelCase :Union[str, Any] = {
'camembert-base': 512,
}
_lowerCAmelCase :Optional[int] = '▁'
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
a__ =CamembertTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=["<s>NOTUSED", "</s>NOTUSED"] , **A , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase : Union[str, Any] = vocab_file
_UpperCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
_UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 263 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCAmelCase = test_metrics
@require_cpu
def _UpperCamelCase ( self ):
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def _UpperCamelCase ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _UpperCamelCase ( self ):
self.test_metrics.main()
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
| 367 |
"""simple docstring"""
import numpy as np
def _a ( _snake_case ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Union[str, Any]=224 , __UpperCAmelCase : Optional[int]=30 , __UpperCAmelCase : Tuple=400 , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , __UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , ):
a : Dict = size if size is not None else {"height": 18, "width": 18}
a : Dict = parent
a : Any = batch_size
a : Dict = num_channels
a : str = image_size
a : str = min_resolution
a : List[str] = max_resolution
a : Optional[int] = do_resize
a : Union[str, Any] = size
a : Union[str, Any] = do_normalize
a : str = image_mean
a : List[Any] = image_std
def __snake_case ( self : int):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = ViTImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int]):
a : Tuple = EfficientFormerImageProcessorTester(self)
@property
def __snake_case ( self : int):
return self.image_proc_tester.prepare_image_processor_dict()
def __snake_case ( self : Tuple):
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_std"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
def __snake_case ( self : str):
pass
def __snake_case ( self : Optional[int]):
# Initialize image_processor
a : int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a : str = image_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __snake_case ( self : List[str]):
# Initialize image_processor
a : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
a : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a : Any = image_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __snake_case ( self : Dict):
# Initialize image_processor
a : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
a : Any = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a : List[Any] = image_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 40 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase : List[Any] = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """facebook/nllb-200-distilled-600M"""
__a = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__a = """translator"""
__a = AutoTokenizer
__a = AutoModelForSeqaSeqLM
__a = LANGUAGE_CODES
__a = ["""text""", """text""", """text"""]
__a = ["""text"""]
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
__UpperCAmelCase : Union[str, Any] = self.lang_to_code[src_lang]
__UpperCAmelCase : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase , return_tensors="""pt""" , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
return self.model.generate(**UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase )
| 115 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=16 , a_=13 , a_=7 , a_=14 , a_=10 , a_=19 , a_=5 , a_=4 , a_=True , a_=16 , a_=2 , a_=4 , a_=4 , a_="gelu" , a_=0.1 , a_=0.1 , a_=[1, 2, 3, 4, 5] , a_=25 , a_=5 , ):
'''simple docstring'''
__snake_case : List[str] = d_model
__snake_case : str = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : List[Any] = prediction_length
__snake_case : Tuple = context_length
__snake_case : Optional[int] = cardinality
__snake_case : Tuple = num_time_features
__snake_case : Union[str, Any] = lags_sequence
__snake_case : Tuple = embedding_dimension
__snake_case : int = is_training
__snake_case : List[str] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = context_length
__snake_case : str = prediction_length + label_length
__snake_case : Union[str, Any] = label_length
__snake_case : Dict = moving_average
__snake_case : Tuple = autocorrelation_factor
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = config.context_length + max(config.lags_sequence )
__snake_case : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__snake_case : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
__snake_case : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__snake_case : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__snake_case : Any = floats_tensor([self.batch_size, config.prediction_length] )
__snake_case : Dict = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.get_config()
__snake_case : List[Any] = self.prepare_autoformer_inputs_dict(a_ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : str = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : int = AutoformerModel(config=a_ ).to(a_ ).eval()
__snake_case : Union[str, Any] = model(**a_ )
__snake_case : str = outputs.encoder_last_hidden_state
__snake_case : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = model.get_encoder()
encoder.save_pretrained(a_ )
__snake_case : Any = AutoformerEncoder.from_pretrained(a_ ).to(a_ )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = model.create_network_inputs(**a_ )
__snake_case , __snake_case : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__snake_case : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__snake_case : Optional[int] = encoder(inputs_embeds=a_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__snake_case : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__snake_case : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__snake_case : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__snake_case : List[str] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Dict = model.get_decoder()
decoder.save_pretrained(a_ )
__snake_case : List[str] = AutoformerDecoder.from_pretrained(a_ ).to(a_ )
__snake_case : Optional[int] = decoder(
trend=a_ , inputs_embeds=a_ , encoder_hidden_states=a_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCamelCase__ =(AutoformerForPrediction,) if is_torch_available() else ()
lowerCamelCase__ ={'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = AutoformerModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ )
__snake_case , __snake_case : List[str] = model_class.from_pretrained(a_ , output_loading_info=a_ )
self.assertEqual(info['''missing_keys'''] , [] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = inspect.signature(getattr(a_ , '''forward''' ) )
# The main input is the name of the argument after `self`
__snake_case : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(a_ )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : str = [*signature.parameters.keys()]
__snake_case : str = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(a_ )] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = True
__snake_case : Optional[int] = getattr(self.model_tester , '''seq_length''' , a_ )
__snake_case : Optional[int] = getattr(self.model_tester , '''decoder_seq_length''' , a_ )
__snake_case : List[Any] = getattr(self.model_tester , '''encoder_seq_length''' , a_ )
__snake_case : Any = getattr(self.model_tester , '''d_model''' , a_ )
__snake_case : int = getattr(self.model_tester , '''num_attention_heads''' , a_ )
__snake_case : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__snake_case : List[str] = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Any = True
__snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Union[str, Any] = outputs.encoder_attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__snake_case : List[str] = len(a_ )
__snake_case : Optional[int] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a_ , a_ )
# decoder attentions
__snake_case : Optional[Any] = outputs.decoder_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__snake_case : Optional[int] = outputs.cross_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__snake_case : Any = True
__snake_case : Optional[Any] = True
__snake_case : Dict = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(a_ , a_ ) )
self.assertEqual(out_len + 2 , len(a_ ) )
__snake_case : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowercase ( _snake_case : Optional[Any]="train-batch.pt" ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=_snake_case , repo_type='''dataset''' )
__snake_case : int = torch.load(_snake_case , map_location=_snake_case )
return batch
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
__snake_case : Union[str, Any] = prepare_batch()
with torch.no_grad():
__snake_case : int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__snake_case : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
__snake_case : Tuple = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__snake_case : Any = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__snake_case : List[Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a_ )
__snake_case : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
__snake_case : Optional[int] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__snake_case : List[str] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__snake_case : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a_ )
__snake_case : Optional[int] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=a_ )
__snake_case : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a_ , rtol=1E-1 ) )
| 24 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24 | 1 |
import sys
import turtle
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , ) -> Union[str, Any]:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__snake_case = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__snake_case = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 310 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['image_processor', 'tokenizer']
lowercase = 'AutoImageProcessor'
lowercase = 'AutoTokenizer'
def __init__( self : int , lowerCamelCase : List[str]=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : str ) -> Tuple:
lowerCAmelCase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
lowerCAmelCase_ : Tuple = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = self.image_processor
lowerCAmelCase_ : Any = False
def __call__( self : List[Any] , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Any = kwargs.pop("""images""" , lowerCamelCase )
lowerCAmelCase_ : Dict = kwargs.pop("""text""" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase_ : str = args[0]
lowerCAmelCase_ : Dict = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCAmelCase_ : Any = self.image_processor(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase_ : Dict = encodings["""input_ids"""]
return inputs
def __lowercase ( self : str , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : List[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@contextmanager
def __lowercase ( self : str ) -> Union[str, Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = self.tokenizer
yield
lowerCAmelCase_ : List[Any] = self.image_processor
lowerCAmelCase_ : List[str] = False
def __lowercase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : str=False , lowerCamelCase : List[Any]=None ) -> Optional[int]:
if added_vocab is None:
lowerCAmelCase_ : str = self.tokenizer.get_added_vocab()
lowerCAmelCase_ : Union[str, Any] = {}
while tokens:
lowerCAmelCase_ : Dict = re.search(R"""<s_(.*?)>""" , lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowerCAmelCase_ : Tuple = start_token.group(1 )
lowerCAmelCase_ : Tuple = re.search(RF'</s_{key}>' , lowerCamelCase , re.IGNORECASE )
lowerCAmelCase_ : Tuple = start_token.group()
if end_token is None:
lowerCAmelCase_ : str = tokens.replace(lowerCamelCase , """""" )
else:
lowerCAmelCase_ : List[str] = end_token.group()
lowerCAmelCase_ : Dict = re.escape(lowerCamelCase )
lowerCAmelCase_ : int = re.escape(lowerCamelCase )
lowerCAmelCase_ : Dict = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , lowerCamelCase , re.IGNORECASE )
if content is not None:
lowerCAmelCase_ : str = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCAmelCase_ : str = self.tokenajson(lowerCamelCase , is_inner_value=lowerCamelCase , added_vocab=lowerCamelCase )
if value:
if len(lowerCamelCase ) == 1:
lowerCAmelCase_ : List[Any] = value[0]
lowerCAmelCase_ : Optional[Any] = value
else: # leaf nodes
lowerCAmelCase_ : List[str] = []
for leaf in content.split(R"""<sep/>""" ):
lowerCAmelCase_ : Union[str, Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCAmelCase_ : Any = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCamelCase )
if len(output[key] ) == 1:
lowerCAmelCase_ : Optional[Any] = output[key][0]
lowerCAmelCase_ : List[Any] = tokens[tokens.find(lowerCamelCase ) + len(lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCamelCase , added_vocab=lowerCamelCase )
if len(lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowercase ( self : Dict ) -> int:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase , )
return self.image_processor_class
@property
def __lowercase ( self : Dict ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase , )
return self.image_processor
| 120 | 0 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCamelCase ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 121 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowercase__ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Optional[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowercase__ : Optional[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowercase__ : int = shift_tokens_right(SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ).logits
lowercase__ : Dict = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE , onehot(SCREAMING_SNAKE_CASE , logits.shape[-1] ) ).mean()
lowercase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowercase__ : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 121 | 1 |
import numpy as np
def lowerCamelCase__ ( A__ : np.ndarray ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase__ ( A__ : np.ndarray ):
'''simple docstring'''
return vector * sigmoid(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_A : int = """CompVis/stable-diffusion-v1-1"""
_A : Any = """CompVis/stable-diffusion-v1-2"""
_A : Optional[int] = """CompVis/stable-diffusion-v1-3"""
_A : Union[str, Any] = """CompVis/stable-diffusion-v1-4"""
class a__ ( a_ ):
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a = True , ):
super()._init_()
lowercase : Optional[Any] = StableDiffusionPipeline.from_pretrained(_a )
lowercase : str = StableDiffusionPipeline.from_pretrained(_a )
lowercase : Dict = StableDiffusionPipeline.from_pretrained(_a )
lowercase : Union[str, Any] = StableDiffusionPipeline(
vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , requires_safety_checker=_a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __magic_name__ ( self ):
return {k: getattr(self , _a ) for k in self.config.keys() if not k.startswith("_" )}
def __magic_name__ ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __magic_name__ ( self ):
self.enable_attention_slicing(_a )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
lowercase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
self.to(_a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowercase : List[Any] = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowercase : Any = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowercase : str = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowercase : Optional[int] = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 202 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Tuple = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =int(number**0.5 )
return number == sq * sq
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[int, int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_SCREAMING_SNAKE_CASE =x_den * y_den * z_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def _lowerCAmelCase ( _UpperCamelCase : int = 35 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =set()
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =Fraction(0 )
_SCREAMING_SNAKE_CASE =42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_SCREAMING_SNAKE_CASE =x_num * y_den + x_den * y_num
_SCREAMING_SNAKE_CASE =x_den * y_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
_SCREAMING_SNAKE_CASE =(
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_SCREAMING_SNAKE_CASE =x_den * x_den * y_den * y_den
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=-1
_SCREAMING_SNAKE_CASE =x_num * y_num
_SCREAMING_SNAKE_CASE =x_den * y_num + x_num * y_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
_SCREAMING_SNAKE_CASE =x_num * x_num * y_num * y_num
_SCREAMING_SNAKE_CASE =(
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
for num, den in unique_s:
total += Fraction(_UpperCamelCase , _UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 114 | 1 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : Optional[Any] = n - 1
lowerCAmelCase : Optional[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : Optional[int] = random.randint(2 , n - 1 )
lowerCAmelCase : str = bin_exp_mod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if b != 1:
lowerCAmelCase : Optional[int] = True
for _ in range(_lowerCamelCase ):
if b == n - 1:
lowerCAmelCase : List[Any] = False
break
lowerCAmelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 108 |
"""simple docstring"""
from math import pow, sqrt
def lowerCamelCase__ ( *_lowerCamelCase : float ) -> bool:
lowerCamelCase_ = len(_lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 183 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'encodec'
def __init__( self , lowercase_=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase_=24_000 , lowercase_=1 , lowercase_=False , lowercase_=None , lowercase_=None , lowercase_=128 , lowercase_=32 , lowercase_=1 , lowercase_=[8, 5, 4, 2] , lowercase_="weight_norm" , lowercase_=7 , lowercase_=7 , lowercase_=3 , lowercase_=2 , lowercase_=True , lowercase_="reflect" , lowercase_=2 , lowercase_=2 , lowercase_=1.0 , lowercase_=1_024 , lowercase_=None , lowercase_=True , **lowercase_ , ):
_snake_case : int = target_bandwidths
_snake_case : int = sampling_rate
_snake_case : Union[str, Any] = audio_channels
_snake_case : List[str] = normalize
_snake_case : List[str] = chunk_length_s
_snake_case : Union[str, Any] = overlap
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_filters
_snake_case : str = num_residual_layers
_snake_case : Tuple = upsampling_ratios
_snake_case : Union[str, Any] = norm_type
_snake_case : Union[str, Any] = kernel_size
_snake_case : Tuple = last_kernel_size
_snake_case : Optional[int] = residual_kernel_size
_snake_case : List[Any] = dilation_growth_rate
_snake_case : Any = use_causal_conv
_snake_case : Tuple = pad_mode
_snake_case : Tuple = compress
_snake_case : Union[str, Any] = num_lstm_layers
_snake_case : Any = trim_right_ratio
_snake_case : str = codebook_size
_snake_case : Dict = codebook_dim if codebook_dim is not None else hidden_size
_snake_case : List[str] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowercase_ )
@property
def UpperCamelCase ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase ( self ):
_snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase ( self ):
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) ) | 284 | from __future__ import annotations
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
_snake_case : Any = sorted(numsa + numsa )
_snake_case ,_snake_case : Any = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Union[str, Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
__SCREAMING_SNAKE_CASE : List[Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 284 | 1 |
'''simple docstring'''
import os
from math import logaa
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "base_exp.txt" ) -> int:
lowerCamelCase__ : float = 0
lowerCamelCase__ : str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase ) , UpperCamelCase ) ) ):
lowerCamelCase__ , lowerCamelCase__ : Dict = list(map(UpperCamelCase , line.split(""",""" ) ) )
if x * logaa(UpperCamelCase ) > largest:
lowerCamelCase__ : Tuple = x * logaa(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : str = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : int = dct.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = 3129
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """vqa2-id2label.json"""
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = {0: """False""", 1: """True"""}
lowerCamelCase__ : int = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Any = 3
lowerCamelCase__ : List[str] = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCamelCase__ : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : Union[str, Any] = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Dict = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : str = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase__ : Optional[int] = """How many cats are there?"""
lowerCamelCase__ : List[str] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Tuple = torch.Size([1, 11, 30522] )
lowerCamelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : str = torch.Size([1, 3129] )
lowerCamelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : str = torch.Size([1, 2] )
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41 | 1 |
'''simple docstring'''
import random
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Tuple = num - 1
__snake_case : str = 0
while s % 2 == 0:
__snake_case : Tuple = s // 2
t += 1
for _ in range(5 ):
__snake_case : List[str] = random.randrange(2 ,num - 1 )
__snake_case : Union[str, Any] = pow(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if v != 1:
__snake_case : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__snake_case : Dict = i + 1
__snake_case : List[Any] = (v**2) % num
return True
def a_ ( _UpperCAmelCase : int ) -> bool:
if num < 2:
return False
__snake_case : Optional[int] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : int = 10_24 ) -> int:
while True:
__snake_case : Optional[int] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(_UpperCAmelCase ):
return num
if __name__ == "__main__":
A__ : Optional[int] = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 357 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case : Optional[Any] = int(sequence[i] ,2 )
return sequence
def a_ ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : Dict = gray_code_sequence_string(bit_count - 1 )
__snake_case : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : str = '0' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : Any = '1' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = current_set.copy()
for row_index, row in enumerate(lowercase_ ):
UpperCAmelCase = row[0]
for column_index, column in enumerate(lowercase_ ):
if magnitude == 0:
UpperCAmelCase = column
continue
UpperCAmelCase = column / magnitude
# Subtract to cancel term
UpperCAmelCase = current_set[0]
UpperCAmelCase = [first_row]
UpperCAmelCase = current_set[1::]
for row in current_set:
UpperCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowercase_ )
continue
for column_index in range(len(lowercase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowercase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase = final_set[0]
UpperCAmelCase = []
UpperCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase = simplify(lowercase_ )
for i in range(len(lowercase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowercase_ )
UpperCAmelCase = resultant
return final_set
def _lowerCAmelCase ( lowercase_ ):
if len(lowercase_ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
UpperCAmelCase = len(lowercase_ ) + 1
if any(len(lowercase_ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(lowercase_ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(lowercase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase = data_set.copy()
UpperCAmelCase = []
for row_index, row in enumerate(lowercase_ ):
if 0 not in row:
UpperCAmelCase = data_set.pop(lowercase_ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , lowercase_ )
UpperCAmelCase = data_set.copy()
UpperCAmelCase = simplify(lowercase_ )
UpperCAmelCase = simplified[::-1]
UpperCAmelCase = []
for row in simplified:
UpperCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase = row.copy()[: len(lowercase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowercase_ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase = temp_row[1::]
UpperCAmelCase = temp_row[::-1]
for column_index, column in enumerate(lowercase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowercase_ )
UpperCAmelCase = []
for item in solutions:
final.append(float(round(lowercase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 78 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowerCAmelCase ( lowercase_ = 8 ):
UpperCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowercase_ )
UpperCAmelCase = i // 3
UpperCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase = (
chars_incl
+ random(lowercase_ , quotient + remainder )
+ random(lowercase_ , lowercase_ )
+ random(lowercase_ , lowercase_ )
)
UpperCAmelCase = list(lowercase_ )
shuffle(lowercase_ )
return "".join(lowercase_ )
# random is a generalised function for letters, characters and numbers
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ):
if len(lowercase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase = any(char in ascii_uppercase for char in password )
UpperCAmelCase = any(char in ascii_lowercase for char in password )
UpperCAmelCase = any(char in digits for char in password )
UpperCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowerCAmelCase ( ):
UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() )
UpperCAmelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowercase_ ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 78 | 1 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = WavaVecaPhonemeCTCTokenizer
_snake_case : Dict = False
def __UpperCAmelCase ( self ) -> str:
super().setUp()
UpperCAmelCase_ : List[str] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
UpperCAmelCase_ : Optional[int] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : int = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=2_0 , _UpperCamelCase=5 ) -> Tuple[str, list]:
UpperCAmelCase_ : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCamelCase )) for i in range(len(_UpperCamelCase ) )]
UpperCAmelCase_ : str = list(filter(lambda _UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_UpperCamelCase ) , _UpperCamelCase ) )
if max_length is not None and len(_UpperCamelCase ) > max_length:
UpperCAmelCase_ : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCamelCase ) < min_length and len(_UpperCamelCase ) > 0:
while len(_UpperCamelCase ) < min_length:
UpperCAmelCase_ : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ : Optional[Any] = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
if " " not in output_txt and len(_UpperCamelCase ) > 1:
UpperCAmelCase_ : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCamelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCamelCase )
)
if with_prefix_space:
UpperCAmelCase_ : Tuple = ' ' + output_txt
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
return output_txt, output_ids
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
UpperCAmelCase_ : Union[str, Any] = tokenizer('m xxx ɪ' , do_phonemize=_UpperCamelCase ).input_ids
self.assertEqual(_UpperCamelCase , [1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
UpperCAmelCase_ : int = tokenizer('m aaa ɪ ccc' , do_phonemize=_UpperCamelCase ).input_ids
self.assertEqual(_UpperCamelCase , [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase_ : List[str] = tokenizer('maɪ c' , do_phonemize=_UpperCamelCase ).input_ids
self.assertEqual(_UpperCamelCase , [3, 2_0_0] ) # mai should be <unk> (=3)
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase_ : Tuple = 'Hello how are you'
UpperCAmelCase_ : Any = tokenizer.phonemize(_UpperCamelCase , phonemizer_lang='en-us' )
self.assertEqual(_UpperCamelCase , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase_ : Optional[int] = 'Hello how are you'
UpperCAmelCase_ : Dict = tokenizer.phonemize(_UpperCamelCase , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(_UpperCamelCase ).input_ids , tokenizer(_UpperCamelCase , do_phonemize=_UpperCamelCase ).input_ids )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase_ : int = 'Hello how are you'
UpperCAmelCase_ : str = tokenizer.phonemize(_UpperCamelCase , phonemizer_lang='en-us' )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(tokenizer(_UpperCamelCase ).input_ids )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase_ : List[str] = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
UpperCAmelCase_ : List[Any] = tokenizer.decode(sample_ids[0] )
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , batch_tokens[0] )
self.assertEqual(_UpperCamelCase , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase_ : Any = 'Hello how are you'
UpperCAmelCase_ : int = tokenizer.phonemize(_UpperCamelCase , phonemizer_lang='en-us' )
self.assertEqual(_UpperCamelCase , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase_ : Dict = 'Hello how are you'
UpperCAmelCase_ : Dict = tokenizer.phonemize(_UpperCamelCase , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(_UpperCamelCase ).input_ids , tokenizer(_UpperCamelCase , do_phonemize=_UpperCamelCase ).input_ids )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
UpperCAmelCase_ : Any = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase_ : List[str] = tokenizer.decode(sample_ids[0] )
UpperCAmelCase_ : Dict = tokenizer.batch_decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , batch_tokens[0] )
self.assertEqual(_UpperCamelCase , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
UpperCAmelCase_ : List[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_UpperCamelCase )
UpperCAmelCase_ : Any = tokenizer.batch_decode(_UpperCamelCase , filter_word_delimiter_token=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , batch_tokens[0] )
self.assertEqual(_UpperCamelCase , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase_ : Any = 'Hello how are you'
UpperCAmelCase_ : Optional[int] = tokenizer.phonemize(_UpperCamelCase , phonemizer_lang='en-us' )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(tokenizer(_UpperCamelCase ).input_ids , filter_word_delimiter_token=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase_ : int = 'Hello how are you'
UpperCAmelCase_ : Union[str, Any] = tokenizer.phonemize(_UpperCamelCase , phonemizer_lang='en-us' )
UpperCAmelCase_ : int = tokenizer.decode(tokenizer(_UpperCamelCase ).input_ids , filter_word_delimiter_token=_UpperCamelCase )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = 'Hello how are you'
UpperCAmelCase_ : Any = tokenizer(_UpperCamelCase , phonemizer_lang='en-us' ).input_ids
UpperCAmelCase_ : Optional[int] = tokenizer(_UpperCamelCase , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(_UpperCamelCase , 'ɛ l o h aʊ a ʁ j u' )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase_ : Any = 'Hello how Are you'
UpperCAmelCase_ : Union[str, Any] = 'hello how are you'
UpperCAmelCase_ : str = tokenizer(_UpperCamelCase ).input_ids
UpperCAmelCase_ : List[str] = tokenizer(_UpperCamelCase ).input_ids
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
UpperCAmelCase_ : Any = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : int = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase_ : Optional[int] = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
UpperCAmelCase_ : Optional[int] = tokenizer.decode(_UpperCamelCase , output_char_offsets=_UpperCamelCase , filter_word_delimiter_token=_UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
self.assertTrue(isinstance(outputs_list[0] , _UpperCamelCase ) )
# transform list to ModelOutput
UpperCAmelCase_ : Optional[int] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(_UpperCamelCase , _UpperCamelCase ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
[recursive_check(_UpperCamelCase , _UpperCamelCase ) for la, la in zip(_UpperCamelCase , _UpperCamelCase )]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
UpperCAmelCase_ : int = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase_ : int = tokenizer.batch_decode(_UpperCamelCase , output_char_offsets=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = [tokenizer.decode(_UpperCamelCase , output_char_offsets=_UpperCamelCase ) for ids in sample_ids]
check_list_tuples_equal(_UpperCamelCase , _UpperCamelCase )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def __UpperCAmelCase ( self ) -> str:
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.get_tokenizers(do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ : Tuple = tokenizer.vocab_size
UpperCAmelCase_ : Dict = len(_UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ : Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCAmelCase_ : Optional[Any] = tokenizer.add_tokens(_UpperCamelCase )
UpperCAmelCase_ : str = tokenizer.vocab_size
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , 0 )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , len(_UpperCamelCase ) )
self.assertEqual(_UpperCamelCase , all_size + len(_UpperCamelCase ) )
UpperCAmelCase_ : str = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCamelCase )
self.assertGreaterEqual(len(_UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase_ : Tuple = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCAmelCase_ : Any = tokenizer.add_special_tokens(_UpperCamelCase )
UpperCAmelCase_ : List[str] = tokenizer.vocab_size
UpperCAmelCase_ : Optional[Any] = len(_UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , 0 )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , len(_UpperCamelCase ) )
self.assertEqual(_UpperCamelCase , all_size_a + len(_UpperCamelCase ) )
UpperCAmelCase_ : str = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCamelCase )
self.assertGreaterEqual(len(_UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def __UpperCAmelCase ( self ) -> List[str]:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase_ : Tuple = self.get_tokenizers(fast=_UpperCamelCase , do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ : List[str] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
UpperCAmelCase_ : str = tokenizer.convert_tokens_to_string(_UpperCamelCase )
self.assertIsInstance(output['text'] , _UpperCamelCase )
| 359 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = ['''input_ids''', '''attention_mask''']
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=None , **_UpperCamelCase , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
UpperCAmelCase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenizer_file=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[int] = len(self.sp_model )
UpperCAmelCase_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ : Any = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase_ : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.__dict__.copy()
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : Dict = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = self.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Any = tgt_lang_id
return inputs
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : str = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> BatchEncoding:
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[lang]
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = [self.eos_token_id, self.cur_lang_code]
| 145 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = test_results.split(""" """ )
__lowerCamelCase = 0
__lowerCamelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowerCamelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(A__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = None
__lowerCamelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , A__ ):
__lowerCamelCase = True
__lowerCamelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
__lowerCamelCase = line
__lowerCamelCase = False
return failures
class lowerCamelCase__:
def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = title
__lowerCamelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
__lowerCamelCase = doc_test_results["""success"""]
__lowerCamelCase = doc_test_results["""failures"""]
__lowerCamelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowerCamelCase = doc_test_results
@property
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = [self._time_spent]
__lowerCamelCase = 0
for time in time_spent:
__lowerCamelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase_ ) == 1:
__lowerCamelCase = [0, 0, time_parts[0]]
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(UpperCamelCase_ )}h{int(UpperCamelCase_ )}m{int(UpperCamelCase_ )}s'
@property
def lowerCAmelCase__ ( self: List[str] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = 40
__lowerCamelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCamelCase_ , UpperCamelCase_ )}
__lowerCamelCase = """"""
for category, failures in category_failures.items():
if len(UpperCamelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( ):
__lowerCamelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(UpperCamelCase_ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: int ):
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
__lowerCamelCase = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else """All tests passed."""
__lowerCamelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = """"""
for key, value in failures.items():
__lowerCamelCase = value[:2_00] + """ [Truncated]""" if len(UpperCamelCase_ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
__lowerCamelCase = job_name
__lowerCamelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
__lowerCamelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: List[Any] ):
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
__lowerCamelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
__lowerCamelCase = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
__lowerCamelCase = F'*Num failures* :{len(job_result["failed"] )} \n'
__lowerCamelCase = job_result["""failures"""]
__lowerCamelCase = self.get_reply_blocks(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text=UpperCamelCase_ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'Results for {job}' , blocks=UpperCamelCase_ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = os.environ["""GITHUB_RUN_ID"""]
__lowerCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
__lowerCamelCase = requests.get(A__ ).json()
__lowerCamelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__lowerCamelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(A__ ):
__lowerCamelCase = requests.get(url + f'&page={i + 2}' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , A__ )
return {}
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = {}
if os.path.exists(A__ ):
__lowerCamelCase = os.listdir(A__ )
for file in files:
try:
with open(os.path.join(A__ , A__ ) , encoding="""utf-8""" ) as f:
__lowerCamelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'Could not open {os.path.join(A__ , A__ )}.' ) from e
return _artifact
def lowerCamelCase__ ( ):
'''simple docstring'''
class lowerCamelCase__:
def __init__( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = name
__lowerCamelCase = []
def __str__( self: List[str] ):
return self.name
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
self.paths.append({"""name""": self.name, """path""": path} )
__lowerCamelCase = {}
__lowerCamelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowerCamelCase = directory
if artifact_name not in _available_artifacts:
__lowerCamelCase = Artifact(A__ )
_available_artifacts[artifact_name].add_path(A__ )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase_ = get_job_links()
UpperCAmelCase_ = retrieve_available_artifacts()
UpperCAmelCase_ = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase_ = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase_ = github_actions_job_links.get('run_doctests')
UpperCAmelCase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
UpperCAmelCase_ = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = handle_test_results(artifact['stats'])
UpperCAmelCase_ = failed
UpperCAmelCase_ = success
UpperCAmelCase_ = time_spent[1:-1] + ', '
UpperCAmelCase_ = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
UpperCAmelCase_ = line.replace('FAILED ', '')
UpperCAmelCase_ = line.split()[0].replace('\n', '')
if "::" in line:
UpperCAmelCase_ , UpperCAmelCase_ = line.split('::')
else:
UpperCAmelCase_ , UpperCAmelCase_ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase_ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase_ = all_failures[test] if test in all_failures else 'N/A'
UpperCAmelCase_ = failure
break
UpperCAmelCase_ = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 45 | 0 |
"""simple docstring"""
import numpy as np
class _A :
"""simple docstring"""
def __init__( self : int):
a : Optional[Any] = (0, 0)
a : Any = None
a : Union[str, Any] = 0
a : List[str] = 0
a : Optional[Any] = 0
def __eq__( self : int , __UpperCAmelCase : Optional[Any]):
return self.position == cell.position
def __snake_case ( self : int):
print(self.position)
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Union[str, Any]=(5, 5)):
a : str = np.zeros(__UpperCAmelCase)
a : Tuple = world_size[0]
a : str = world_size[1]
def __snake_case ( self : Optional[Any]):
print(self.w)
def __snake_case ( self : str , __UpperCAmelCase : Dict):
a : Any = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
a : Dict = cell.position[0]
a : Optional[Any] = cell.position[1]
a : List[Any] = []
for n in neughbour_cord:
a : Union[str, Any] = current_x + n[0]
a : int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
a : Tuple = Cell()
a : Tuple = (x, y)
a : Any = cell
neighbours.append(__UpperCAmelCase)
return neighbours
def lowercase ( A_ , A_ , A_ )-> int:
'''simple docstring'''
a : Tuple = []
a : str = []
_open.append(A_ )
while _open:
a : Optional[int] = np.argmin([n.f for n in _open] )
a : Dict = _open[min_f]
_closed.append(_open.pop(A_ ) )
if current == goal:
break
for n in world.get_neigbours(A_ ):
for c in _closed:
if c == n:
continue
a : List[str] = current.g + 1
a , a : List[str] = n.position
a , a : Dict = goal.position
a : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
a : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(A_ )
a : Optional[int] = []
while current.parent is not None:
path.append(current.position )
a : Any = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__lowercase = Gridworld()
# Start position and goal
__lowercase = Cell()
__lowercase = (0, 0)
__lowercase = Cell()
__lowercase = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
__lowercase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__lowercase = 1
print(world.w)
| 226 |
"""simple docstring"""
__lowercase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """negative_prompt"""])
__lowercase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
__lowercase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image""", """mask_image"""])
__lowercase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""example_image""", """image""", """mask_image"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset(["""input_tokens"""])
__lowercase = frozenset(["""input_tokens"""])
| 226 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list ):
if not postfix_notation:
return 0
__a : Optional[Any] = {'+', '-', '*', '/'}
__a : list[Any] = []
for token in postfix_notation:
if token in operations:
__a , __a : List[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_SCREAMING_SNAKE_CASE ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[Any] = tmp_path / 'file.csv'
__a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : str = tmp_path / 'malformed_file.csv'
__a : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = tmp_path / 'csv_with_image.csv'
__a : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Union[str, Any] = tmp_path / 'csv_with_label.csv'
__a : Any = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = tmp_path / 'csv_with_int_list.csv'
__a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : int = Csv()
__a : str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1]
__a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__a : Any = csv._generate_tables([[csv_file_with_image]] )
__a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__a : Any = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__a : List[str] = csv._generate_tables([[csv_file_with_label]] )
__a : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__a : int = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__a : Any = csv._generate_tables([[csv_file_with_int_list]] )
__a : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__a : Tuple = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 27 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def lowerCamelCase_ ( UpperCAmelCase_ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError()
| 37 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''fnet'''
def __init__( self : Tuple , UpperCAmelCase_ : str=32_000 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-12 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=2 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
__UpperCAmelCase : List[Any] = tpu_short_seq_length
| 37 | 1 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : int ):
snake_case__ : Tuple = n
snake_case__ : int = [None] * self.n
snake_case__ : Optional[int] = 0 # index of the first element
snake_case__ : List[Any] = 0
snake_case__ : str = 0
def __len__( self : Optional[Any] ):
return self.size
def lowerCamelCase ( self : Dict ):
return self.size == 0
def lowerCamelCase ( self : Optional[int] ):
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase ( self : Optional[int] , snake_case_ : str ):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
snake_case__ : Optional[int] = data
snake_case__ : Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase ( self : List[Any] ):
if self.size == 0:
raise Exception("""UNDERFLOW""" )
snake_case__ : List[str] = self.array[self.front]
snake_case__ : str = None
snake_case__ : Any = (self.front + 1) % self.n
self.size -= 1
return temp
| 35 |
'''simple docstring'''
import argparse
import os
import re
__a = "src/transformers"
# Pattern that looks at the indentation in a line.
__a = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a = re.compile(R"\[([^\]]+)\]")
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : int = _re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
snake_case__ : str = 0
snake_case__ : Union[str, Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
snake_case__ : Tuple = ["""\n""".join(lines[:index] )]
else:
snake_case__ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : Optional[int] = [lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
snake_case__ : str = [lines[index + 1]]
index += 1
else:
snake_case__ : int = []
else:
blocks.append("""\n""".join(_lowerCAmelCase ) )
snake_case__ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("""\n""".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __snake_case( _lowerCAmelCase ) -> Tuple:
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(_lowerCAmelCase ):
return x
if key is None:
snake_case__ : Optional[int] = noop
# Constants are all uppercase, they go first.
snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
# This inner function sort imports between [ ].
def _replace(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]"
snake_case__ : str = import_statement.split("""\n""" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1
snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[int] = split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Optional[Any] = main_blocks[block_idx]
snake_case__ : Dict = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : Dict = 0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
snake_case__ : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase=True ) -> Tuple:
snake_case__ : str = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase )
if result:
snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 35 | 1 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
UpperCamelCase = np.concatenate(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCamelCase = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
UpperCamelCase = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase = 2.0 * image - 1.0
UpperCamelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
return image
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.99_95 ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase = True
UpperCamelCase = va.device
UpperCamelCase = va.cpu().numpy()
UpperCamelCase = va.cpu().numpy()
UpperCamelCase = np.sum(va * va / (np.linalg.norm(_SCREAMING_SNAKE_CASE ) * np.linalg.norm(_SCREAMING_SNAKE_CASE )) )
if np.abs(_SCREAMING_SNAKE_CASE ) > DOT_THRESHOLD:
UpperCamelCase = (1 - t) * va + t * va
else:
UpperCamelCase = np.arccos(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.sin(_SCREAMING_SNAKE_CASE )
UpperCamelCase = theta_a * t
UpperCamelCase = np.sin(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase = sin_theta_t / sin_theta_a
UpperCamelCase = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
return va
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
UpperCamelCase = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in model.parameters():
UpperCamelCase = value
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a , __a , __a , __a , __a , __a=None , __a=None , __a=None , ) -> Tuple:
super().__init__()
self.register_modules(
vae=__a , text_encoder=__a , clip_model=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , coca_model=__a , coca_tokenizer=__a , coca_transform=__a , )
UpperCamelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , __a )
else feature_extractor.size["shortest_edge"]
)
UpperCamelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __a )
set_requires_grad(self.clip_model , __a )
def snake_case_ (self , __a = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def snake_case_ (self ) -> Optional[int]:
self.enable_attention_slicing(__a )
def snake_case_ (self ) -> int:
set_requires_grad(self.vae , __a )
def snake_case_ (self ) -> Optional[Any]:
set_requires_grad(self.vae , __a )
def snake_case_ (self ) -> Optional[int]:
set_requires_grad(self.unet , __a )
def snake_case_ (self ) -> str:
set_requires_grad(self.unet , __a )
def snake_case_ (self , __a , __a , __a ) -> str:
# get the original timestep using init_timestep
UpperCamelCase = min(int(num_inference_steps * strength ) , __a )
UpperCamelCase = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ (self , __a , __a , __a , __a , __a , __a=None ) -> Tuple:
if not isinstance(__a , torch.Tensor ):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(__a )}" )
UpperCamelCase = image.to(device=__a , dtype=__a )
if isinstance(__a , __a ):
UpperCamelCase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__a )
]
UpperCamelCase = torch.cat(__a , dim=0 )
else:
UpperCamelCase = self.vae.encode(__a ).latent_dist.sample(__a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 0.18215 * init_latents
UpperCamelCase = init_latents.repeat_interleave(__a , dim=0 )
UpperCamelCase = randn_tensor(init_latents.shape , generator=__a , device=__a , dtype=__a )
# get latents
UpperCamelCase = self.scheduler.add_noise(__a , __a , __a )
UpperCamelCase = init_latents
return latents
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = self.coca_transform(__a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def snake_case_ (self , __a , __a ) -> Union[str, Any]:
UpperCamelCase = self.feature_extractor.preprocess(__a )
UpperCamelCase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase = self.clip_model.get_image_features(__a )
UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a )
UpperCamelCase = image_embeddings_clip.repeat_interleave(__a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , ) -> List[str]:
UpperCamelCase = latents.detach().requires_grad_()
UpperCamelCase = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase = torch.sqrt(__a )
UpperCamelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __a ):
UpperCamelCase = self.scheduler.sigmas[index]
UpperCamelCase = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 1 / 0.18215 * sample
UpperCamelCase = self.vae.decode(__a ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = transforms.Resize(self.feature_extractor_size )(__a )
UpperCamelCase = self.normalize(__a ).to(latents.dtype )
UpperCamelCase = self.clip_model.get_image_features(__a )
UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a )
UpperCamelCase = spherical_dist_loss(__a , __a ).mean() * clip_guidance_scale
UpperCamelCase = -torch.autograd.grad(__a , __a )[0]
if isinstance(self.scheduler , __a ):
UpperCamelCase = latents.detach() + grads * (sigma**2)
UpperCamelCase = noise_pred_original
else:
UpperCamelCase = noise_pred_original - torch.sqrt(__a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , __a , __a , __a = None , __a = None , __a = 5_12 , __a = 5_12 , __a = 0.6 , __a = 50 , __a = 7.5 , __a = 1 , __a = 0.0 , __a = 1_00 , __a = None , __a = "pil" , __a = True , __a = 0.8 , __a = 0.1 , __a = 0.1 , ) -> List[Any]:
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(__a )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(__a , torch.Generator ) and batch_size > 1:
UpperCamelCase = [generator] + [None] * (batch_size - 1)
UpperCamelCase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
UpperCamelCase = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase = ", ".join(__a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__a ):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase = self.get_image_description(__a )
if style_prompt is None:
if len(__a ):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase = self.get_image_description(__a )
# get prompt text embeddings for content and style
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase = slerp(__a , __a , __a )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = text_embeddings.repeat_interleave(__a , dim=0 )
# set timesteps
UpperCamelCase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase = {}
if accepts_offset:
UpperCamelCase = 1
self.scheduler.set_timesteps(__a , **__a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase = self.get_timesteps(__a , __a , self.device )
UpperCamelCase = timesteps[:1].repeat(__a )
# Preprocess image
UpperCamelCase = preprocess(__a , __a , __a )
UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a )
UpperCamelCase = preprocess(__a , __a , __a )
UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a )
UpperCamelCase = slerp(__a , __a , __a )
if clip_guidance_scale > 0:
UpperCamelCase = self.get_clip_image_embeddings(__a , __a )
UpperCamelCase = self.get_clip_image_embeddings(__a , __a )
UpperCamelCase = slerp(
__a , __a , __a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = content_text_input.input_ids.shape[-1]
UpperCamelCase = self.tokenizer([""] , padding="max_length" , max_length=__a , return_tensors="pt" )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase = uncond_embeddings.repeat_interleave(__a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase = torch.randn(__a , generator=__a , device="cpu" , dtype=__a ).to(
self.device )
else:
UpperCamelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
# check if the scheduler accepts generator
UpperCamelCase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase = generator
with self.progress_bar(total=__a ):
for i, t in enumerate(__a ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase = self.cond_fn(
__a , __a , __a , __a , __a , __a , __a , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 1 / 0.18215 * latents
UpperCamelCase = self.vae.decode(__a ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
| 358 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244 | 0 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
print('Loading config file...' )
def flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any="" , __SCREAMING_SNAKE_CASE : List[Any]="." ):
lowercase_ : List[str] = []
for k, v in d.items():
lowercase_ : Dict = parent_key + sep + k if parent_key else k
if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sep=__SCREAMING_SNAKE_CASE ).items() )
else:
items.append((new_key, v) )
return dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = argparse.Namespace()
with open(__SCREAMING_SNAKE_CASE , 'r' ) as yaml_file:
try:
lowercase_ : str = yaml.load(__SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader )
lowercase_ : List[Any] = flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE )
for k, v in flat_cfg.items():
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE ) ) )
return config
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : int = MobileViTVaConfig()
lowercase_ : List[str] = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowercase_ : List[Any] = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : str = 3_84
else:
lowercase_ : Dict = 2_56
lowercase_ : int = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowercase_ : int = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : Optional[Any] = 3_84
else:
lowercase_ : Tuple = 2_56
lowercase_ : List[str] = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowercase_ : int = 1_51
lowercase_ : Optional[Any] = 5_12
lowercase_ : str = 'ade20k-id2label.json'
lowercase_ : List[Any] = True
elif task_name.startswith('voc_' ):
lowercase_ : Union[str, Any] = 21
lowercase_ : Tuple = 5_12
lowercase_ : List[str] = 'pascal-voc-id2label.json'
lowercase_ : str = True
# orig_config
lowercase_ : Optional[int] = load_orig_config_file(__SCREAMING_SNAKE_CASE )
assert getattr(__SCREAMING_SNAKE_CASE , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
lowercase_ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
lowercase_ : Optional[Any] = 'huggingface/label-files'
lowercase_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : int = idalabel
lowercase_ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = val
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
if base_model:
lowercase_ : int = ''
else:
lowercase_ : str = 'mobilevitv2.'
lowercase_ : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowercase_ : Dict = k[8:]
else:
lowercase_ : Union[str, Any] = k
if ".block." in k:
lowercase_ : List[str] = k_new.replace('.block.' , '.' )
if ".conv." in k:
lowercase_ : List[Any] = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
lowercase_ : str = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
lowercase_ : Dict = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
lowercase_ : Tuple = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowercase_ : Any = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
lowercase_ : str = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
lowercase_ : Tuple = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
lowercase_ : Any = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
lowercase_ : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowercase_ : Dict = [0, 1]
elif i == 4:
lowercase_ : int = [0, 1, 2, 3]
elif i == 5:
lowercase_ : List[str] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
lowercase_ : List[str] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowercase_ : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
lowercase_ : str = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowercase_ : Optional[Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowercase_ : Any = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
lowercase_ : List[str] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowercase_ : int = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowercase_ : str = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
lowercase_ : Union[str, Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
lowercase_ : Optional[int] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
lowercase_ : Dict = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
lowercase_ : Dict = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : str = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__SCREAMING_SNAKE_CASE )
for k in keys_to_ignore:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( ):
lowercase_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Tuple = get_mobilevitva_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load original state_dict
lowercase_ : Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowercase_ : Tuple = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : Optional[int] = False
else:
lowercase_ : Any = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : int = False
# remove and rename some keys of load the original model
lowercase_ : Any = checkpoint
remove_unused_keys(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load modified state_dict
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase_ : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase_ : Any = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase_ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify classification model
if task_name.startswith('imagenet' ):
lowercase_ : List[str] = outputs.logits
lowercase_ : int = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowercase_ : Optional[int] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 213 | """simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if len(__SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(__SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
lowercase_ : Any = False
if low == high:
return swapped
lowercase_ : str = low
lowercase_ : int = high
while left < right:
if collection[left] > collection[right]:
lowercase_ , lowercase_ : Optional[Any] = (
collection[right],
collection[left],
)
lowercase_ : Tuple = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase_ , lowercase_ : Dict = (
collection[right + 1],
collection[left],
)
lowercase_ : str = True
lowercase_ : Optional[Any] = low + int((high - low) / 2 )
lowercase_ : str = circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
lowercase_ : Dict = True
while is_not_sorted is True:
lowercase_ : Optional[Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 213 | 1 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "autoformer"
UpperCAmelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__(self , __a = None , __a = None , __a = "student_t" , __a = "nll" , __a = 1 , __a = [1, 2, 3, 4, 5, 6, 7] , __a = True , __a = 0 , __a = 0 , __a = 0 , __a = 0 , __a = None , __a = None , __a = 64 , __a = 2 , __a = 2 , __a = 2 , __a = 2 , __a = 32 , __a = 32 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 0.1 , __a = 0.1 , __a = 0.1 , __a = 1_00 , __a = 0.02 , __a = True , __a=True , __a = 10 , __a = 25 , __a = 3 , **__a , ) -> Union[str, Any]:
# time series specific configuration
UpperCamelCase = prediction_length
UpperCamelCase = context_length if context_length is not None else prediction_length
UpperCamelCase = distribution_output
UpperCamelCase = loss
UpperCamelCase = input_size
UpperCamelCase = num_time_features
UpperCamelCase = lags_sequence
UpperCamelCase = scaling
UpperCamelCase = num_dynamic_real_features
UpperCamelCase = num_static_real_features
UpperCamelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCamelCase = cardinality
else:
UpperCamelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCamelCase = embedding_dimension
else:
UpperCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCamelCase = d_model
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_attention_heads
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = decoder_layers
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = use_cache
# Autoformer
UpperCamelCase = label_length
UpperCamelCase = moving_average
UpperCamelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=__a , **__a )
@property
def snake_case_ (self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 244 |
"""simple docstring"""
import math
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = int(math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase = 0
while arr[min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - 1] < x:
UpperCamelCase = step
step += int(math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase = prev + 1
if prev == min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
lowerCAmelCase__ = int(input('''Enter the number to be searched:\n'''))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 244 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str=3 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str=False , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=9_9 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[int]=3_7 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Dict=5_1_2 , __UpperCamelCase : Union[str, Any]=1_6 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : List[Any]=None , )->List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase__ ( self : List[str] )->Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any )->Optional[int]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__UpperCAmelCase , )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int )->List[str]:
_UpperCAmelCase = FalconModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_UpperCAmelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , )->str:
_UpperCAmelCase = True
_UpperCAmelCase = FalconModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[str] , )->Optional[Any]:
_UpperCAmelCase = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , )->str:
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
_UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def lowercase__ ( self : Any )->Tuple:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (FalconForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
_UpperCAmelCase = FalconModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : str )->Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase , *_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_UpperCAmelCase = alibi
self.model_tester.create_and_check_model(__UpperCAmelCase , *__UpperCAmelCase )
def lowercase__ ( self : Dict )->int:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCAmelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : str )->Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = '''single_label_classification'''
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCAmelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = FalconForCausalLM(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_UpperCAmelCase = input_ids.shape[0]
_UpperCAmelCase = model._convert_to_rw_cache(result.past_key_values )
_UpperCAmelCase = model._convert_cache_to_standard_format(__UpperCAmelCase , __UpperCAmelCase )
for layer in range(len(__UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = '''multi_label_classification'''
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCAmelCase )
_UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : List[str] )->int:
for model_class in self.all_generative_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__UpperCAmelCase , '''use_cache''' ):
return
_UpperCAmelCase = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
if "use_cache" not in inputs:
_UpperCAmelCase = True
_UpperCAmelCase = model(**__UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_UpperCAmelCase = (
getattr(__UpperCAmelCase , '''decoder_layers''' , __UpperCAmelCase )
or getattr(__UpperCAmelCase , '''num_decoder_layers''' , __UpperCAmelCase )
or config.num_hidden_layers
)
_UpperCAmelCase = getattr(__UpperCAmelCase , '''num_kv_heads''' , config.num_attention_heads )
_UpperCAmelCase = getattr(__UpperCAmelCase , '''d_model''' , config.hidden_size )
_UpperCAmelCase = embed_dim // num_attention_heads
_UpperCAmelCase = outputs['''past_key_values''']
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = inputs['''input_ids'''].shape
for i in range(__UpperCAmelCase ):
if config.new_decoder_architecture:
_UpperCAmelCase = config.num_attention_heads
elif config.multi_query:
_UpperCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : Dict )->Tuple:
_UpperCAmelCase = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
_UpperCAmelCase = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__UpperCAmelCase )
_UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCAmelCase )
_UpperCAmelCase = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
_UpperCAmelCase = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=1_9 )
_UpperCAmelCase = tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] )->List[str]:
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
_UpperCAmelCase = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(__UpperCAmelCase )
_UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 )
model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 )
model.generate(**__UpperCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase__ ( self : Tuple )->Dict:
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
_UpperCAmelCase = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(device=__UpperCAmelCase )
_UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCAmelCase )
# Test results are the same with and without cache
_UpperCAmelCase = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=2_0 , use_cache=__UpperCAmelCase )
_UpperCAmelCase = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=2_0 , use_cache=__UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 260 |
def _a ( a :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
a = gray_code_sequence_string(a )
#
# convert them to integers
for i in range(len(a ) ):
a = int(sequence[i] , 2 )
return sequence
def _a ( a :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = '''0''' + smaller_sequence[i]
sequence.append(a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = '''1''' + smaller_sequence[i]
sequence.append(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
snake_case_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = None
lowercase__ = None
class lowercase__ ( lowercase ):
lowercase__ = """train"""
lowercase__ = """dev"""
lowercase__ = """test"""
class lowercase__ :
@staticmethod
def UpperCamelCase_ ( lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[Split, str] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCamelCase_ ( lowerCamelCase__ : str ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCamelCase_ ( lowerCamelCase__ : List[InputExample] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Dict="[CLS]" ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[str]="[SEP]" ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : int=0 ,lowerCamelCase__ : List[Any]=0 ,lowerCamelCase__ : int=-100 ,lowerCamelCase__ : List[str]=0 ,lowerCamelCase__ : Union[str, Any]=True ,):
'''simple docstring'''
_UpperCamelCase : str = {label: i for i, label in enumerate(lowerCamelCase__ )}
_UpperCamelCase : int = []
for ex_index, example in enumerate(lowerCamelCase__ ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' ,lowerCamelCase__ ,len(lowerCamelCase__ ) )
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : int = []
for word, label in zip(example.words ,example.labels ):
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowerCamelCase__ ) > 0:
tokens.extend(lowerCamelCase__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCamelCase__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add()
if len(lowerCamelCase__ ) > max_seq_length - special_tokens_count:
_UpperCamelCase : List[str] = tokens[: (max_seq_length - special_tokens_count)]
_UpperCamelCase : str = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_UpperCamelCase : str = [sequence_a_segment_id] * len(lowerCamelCase__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_UpperCamelCase : int = [cls_token] + tokens
_UpperCamelCase : Optional[Any] = [pad_token_label_id] + label_ids
_UpperCamelCase : Tuple = [cls_token_segment_id] + segment_ids
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_UpperCamelCase : str = [1 if mask_padding_with_zero else 0] * len(lowerCamelCase__ )
# Zero-pad up to the sequence length.
_UpperCamelCase : int = max_seq_length - len(lowerCamelCase__ )
if pad_on_left:
_UpperCamelCase : Dict = ([pad_token] * padding_length) + input_ids
_UpperCamelCase : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_UpperCamelCase : List[str] = ([pad_token_segment_id] * padding_length) + segment_ids
_UpperCamelCase : List[Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowerCamelCase__ ) == max_seq_length
assert len(lowerCamelCase__ ) == max_seq_length
assert len(lowerCamelCase__ ) == max_seq_length
assert len(lowerCamelCase__ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' ,example.guid )
logger.info('tokens: %s' ,' '.join([str(lowerCamelCase__ ) for x in tokens] ) )
logger.info('input_ids: %s' ,' '.join([str(lowerCamelCase__ ) for x in input_ids] ) )
logger.info('input_mask: %s' ,' '.join([str(lowerCamelCase__ ) for x in input_mask] ) )
logger.info('segment_ids: %s' ,' '.join([str(lowerCamelCase__ ) for x in segment_ids] ) )
logger.info('label_ids: %s' ,' '.join([str(lowerCamelCase__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCamelCase : List[Any] = None
features.append(
InputFeatures(
input_ids=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,label_ids=lowerCamelCase__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase__ ( lowercase ):
lowercase__ = 42
lowercase__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : Union[str, Any] ,lowerCamelCase__ : TokenClassificationTask ,lowerCamelCase__ : str ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Split = Split.train ,):
'''simple docstring'''
# Load data features from cache or dataset file
_UpperCamelCase : Tuple = os.path.join(
lowerCamelCase__ ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(lowerCamelCase__ ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCamelCase : Any = cached_features_file + '.lock'
with FileLock(lowerCamelCase__ ):
if os.path.exists(lowerCamelCase__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
_UpperCamelCase : Union[str, Any] = torch.load(lowerCamelCase__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
_UpperCamelCase : Optional[int] = token_classification_task.read_examples_from_file(lowerCamelCase__ ,lowerCamelCase__ )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCamelCase : List[Any] = token_classification_task.convert_examples_to_features(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=lowerCamelCase__ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features ,lowerCamelCase__ )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase__ :
lowercase__ = 42
lowercase__ = -1_00
def __init__( self : Tuple ,lowerCamelCase__ : TokenClassificationTask ,lowerCamelCase__ : str ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : Split = Split.train ,):
'''simple docstring'''
_UpperCamelCase : Tuple = token_classification_task.read_examples_from_file(lowerCamelCase__ ,lowerCamelCase__ )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCamelCase : Union[str, Any] = token_classification_task.convert_examples_to_features(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=lowerCamelCase__ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCamelCase : Any = tf.data.Dataset.from_generator(
lowerCamelCase__ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,(
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
_UpperCamelCase : List[Any] = tf.data.Dataset.from_generator(
lowerCamelCase__ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,(
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : str ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : int ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
return self.features[i]
| 236 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_UpperCamelCase : Any = 1_0_2_4
_UpperCamelCase : List[Any] = 4_0_9_6
_UpperCamelCase : List[str] = 2_4
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Union[str, Any] = [5, 1_1, 1_7, 2_3]
_UpperCamelCase : Any = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
_UpperCamelCase : Tuple = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
_UpperCamelCase : Optional[int] = 7_6_8
_UpperCamelCase : Optional[Any] = [1, 1, 1, 0.5]
_UpperCamelCase : List[Any] = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
_UpperCamelCase : Optional[int] = 1_5_0
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Dict = (1, 3_8_4, 3_8_4)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = 'project'
if "ade" in checkpoint_url:
_UpperCamelCase : Dict = True
_UpperCamelCase : Dict = 7_6_8
_UpperCamelCase : Union[str, Any] = [1, 1, 1, 0.5]
_UpperCamelCase : Union[str, Any] = 1_5_0
_UpperCamelCase : str = 1_6
_UpperCamelCase : Tuple = 'huggingface/label-files'
_UpperCamelCase : Tuple = 'ade20k-id2label.json'
_UpperCamelCase : Tuple = json.load(open(cached_download(hf_hub_url(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) ) , 'r' ) )
_UpperCamelCase : str = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCamelCase : int = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase : List[str] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_UpperCamelCase : int = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_UpperCamelCase : Any = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_UpperCamelCase : Tuple = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_UpperCamelCase : List[str] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_UpperCamelCase : int = name.replace('proj' , 'projection' )
if "blocks" in name:
_UpperCamelCase : List[str] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCamelCase : str = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_UpperCamelCase : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_UpperCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_UpperCamelCase : Dict = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_UpperCamelCase : List[str] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_UpperCamelCase : List[str] = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_UpperCamelCase : Any = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_UpperCamelCase : int = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_UpperCamelCase : Dict = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_UpperCamelCase : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase : str = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_UpperCamelCase : Dict = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_UpperCamelCase : int = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_UpperCamelCase : Dict = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase : str = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase : Dict = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_UpperCamelCase : int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_UpperCamelCase : Union[str, Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
_UpperCamelCase : Dict = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_UpperCamelCase : str = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_UpperCamelCase : Any = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_UpperCamelCase : List[Any] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_UpperCamelCase : Dict = name.replace('..' , '.' )
if "stem.conv" in name:
_UpperCamelCase : Tuple = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_UpperCamelCase : Optional[int] = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_UpperCamelCase : List[str] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_UpperCamelCase : Union[str, Any] = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_UpperCamelCase : Dict = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_UpperCamelCase : str = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_UpperCamelCase : Tuple = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : int = in_proj_bias[: config.hidden_size]
_UpperCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def A__ ( ):
_UpperCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : List[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : int = get_dpt_config(UpperCAmelCase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase : Any = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : int = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
_UpperCamelCase : Union[str, Any] = DPTForSemanticSegmentation(UpperCAmelCase_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
# Check outputs on an image
_UpperCamelCase : Tuple = 4_8_0 if 'ade' in checkpoint_url else 3_8_4
_UpperCamelCase : Any = DPTImageProcessor(size=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors='pt' )
# forward pass
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ ).logits if 'ade' in checkpoint_url else model(**UpperCAmelCase_ ).predicted_depth
if show_prediction:
_UpperCamelCase : List[str] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=UpperCAmelCase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
snake_case_ : Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__: List[Any] = logging.getLogger(__name__)
__magic_name__: Optional[int] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__: List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCamelCase__ )} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase__ : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowercase__ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowercase__ : bool = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __magic_name__ ( self ) -> Dict:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can\'t be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class snake_case__ :
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase__ : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
lowercase__ : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowercase__ : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
lowercase__ : Optional[int] = field(
default=lowerCamelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowercase__ : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowercase__ : bool = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __magic_name__ ( self ) -> Optional[int]:
if self.train_file is not None:
__magic_name__ : List[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__magic_name__ : Any = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
with open(__lowerCAmelCase, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : str = [json.loads(__lowerCAmelCase ) for line in f.read().splitlines() if (len(__lowerCAmelCase ) > 0 and not line.isspace())]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
__magic_name__ : List[Any] = {c: dataset[c] for c in dataset.column_names}
__magic_name__ : Any = refs
return Dataset.from_dict(__lowerCAmelCase )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__magic_name__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""", __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ : Tuple = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__magic_name__ : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'train[:{data_args.validation_split_percentage}%]', )
__magic_name__ : Optional[int] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'train[{data_args.validation_split_percentage}%:]', )
else:
__magic_name__ : List[str] = {}
if data_args.train_file is not None:
__magic_name__ : List[str] = data_args.train_file
if data_args.validation_file is not None:
__magic_name__ : Tuple = data_args.validation_file
__magic_name__ : int = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
__magic_name__ : int = '''text'''
__magic_name__ : int = load_dataset(__lowerCAmelCase, data_files=__lowerCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__magic_name__ : str = AutoConfig.from_pretrained(model_args.config_name, **__lowerCAmelCase )
elif model_args.model_name_or_path:
__magic_name__ : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path, **__lowerCAmelCase )
else:
__magic_name__ : int = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
__magic_name__ : Any = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__magic_name__ : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **__lowerCAmelCase )
elif model_args.model_name_or_path:
__magic_name__ : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **__lowerCAmelCase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
__magic_name__ : Dict = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=__lowerCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("""Training new model from scratch""" )
__magic_name__ : Optional[Any] = AutoModelForMaskedLM.from_config(__lowerCAmelCase )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__magic_name__ : str = datasets['''train'''].column_names
else:
__magic_name__ : Any = datasets['''validation'''].column_names
__magic_name__ : Optional[Any] = '''text''' if '''text''' in column_names else column_names[0]
__magic_name__ : Optional[Any] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(_A ):
# Remove empty lines
__magic_name__ : Dict = [line for line in examples['''text'''] if len(__lowerCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""], padding=__lowerCAmelCase, truncation=__lowerCAmelCase, max_length=data_args.max_seq_length )
__magic_name__ : Optional[int] = datasets.map(
__lowerCAmelCase, batched=__lowerCAmelCase, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__magic_name__ : Tuple = add_chinese_references(tokenized_datasets["""train"""], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__magic_name__ : int = add_chinese_references(
tokenized_datasets["""validation"""], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__magic_name__ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__magic_name__ : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
__magic_name__ : List[Any] = DataCollatorForWholeWordMask(tokenizer=__lowerCAmelCase, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__magic_name__ : List[Any] = Trainer(
model=__lowerCAmelCase, args=__lowerCAmelCase, train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None, eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None, tokenizer=__lowerCAmelCase, data_collator=__lowerCAmelCase, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__magic_name__ : Union[str, Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__magic_name__ : List[str] = model_args.model_name_or_path
else:
__magic_name__ : Dict = None
__magic_name__ : List[str] = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__magic_name__ : List[Any] = os.path.join(training_args.output_dir, """train_results.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase, """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, """trainer_state.json""" ) )
# Evaluation
__magic_name__ : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ : Optional[int] = trainer.evaluate()
__magic_name__ : Optional[Any] = math.exp(eval_output["""eval_loss"""] )
__magic_name__ : List[Any] = perplexity
__magic_name__ : Optional[Any] = os.path.join(training_args.output_dir, """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def UpperCamelCase ( _A ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 342 | import os
from typing import Dict, List, Tuple, TypeVar, Union
__snake_case = TypeVar('''T''')
__snake_case = Union[List[T], Tuple[T, ...]]
__snake_case = Union[T, List[T], Dict[str, T]]
__snake_case = Union[str, bytes, os.PathLike]
| 348 | 0 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
# Initialise PyTorch model
A__ = TaConfig.from_json_file(_lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
A__ = TaForConditionalGeneration(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 123 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : Tuple ):
A__ = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
A__ = 10_24
A__ = 40_96
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [2_56, 5_12, 10_24, 10_24]
A__ = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = [2_56, 5_12, 7_68, 7_68]
A__ = 1_50
A__ = 16
A__ = (1, 3_84, 3_84)
A__ = False
A__ = "project"
if "ade" in checkpoint_url:
A__ = True
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = 1_50
A__ = 16
A__ = "huggingface/label-files"
A__ = "ade20k-id2label.json"
A__ = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
A__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : int ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
A__ = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
A__ = name.replace("patch_embed" , "" )
if "pos_embed" in name:
A__ = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
A__ = name.replace("proj" , "projection" )
if "blocks" in name:
A__ = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
A__ = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
A__ = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
A__ = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
A__ = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
A__ = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
A__ = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
A__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
A__ = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
A__ = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
A__ = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
A__ = name.replace("conv1" , "convolution1" )
if "conv2" in name:
A__ = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
A__ = name.replace("pretrained" , "dpt" )
if "bn" in name:
A__ = name.replace("bn" , "batch_norm" )
if "head" in name:
A__ = name.replace("head" , "head.head" )
if "encoder.norm" in name:
A__ = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
A__ = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
A__ = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
A__ = name.replace(".." , "." )
if "stem.conv" in name:
A__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
A__ = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
A__ = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
A__ = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
A__ = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
A__ = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
A__ = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : str ):
A__, A__ = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(_lowerCamelCase )
A__ = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(_lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
A__ = 4_80 if "ade" in checkpoint_url else 3_84
A__ = DPTImageProcessor(size=_lowerCamelCase )
A__ = prepare_img()
A__ = image_processor(_lowerCamelCase , return_tensors="pt" )
# forward pass
A__ = model(**_lowerCamelCase ).logits if "ade" in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
if show_prediction:
A__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 123 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
debug_launcher(test_script.main )
def lowerCAmelCase_ ( self : str ):
debug_launcher(test_ops.main )
| 315 | import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase = (self.patch_size, self.patch_size)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxViTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 343 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
def __init__( self : str , A__ : Union[str, Any] , A__ : Dict=13 , A__ : Optional[int]=7 , A__ : Tuple=True , A__ : Optional[Any]=True , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : Union[str, Any]=99 , A__ : str=32 , A__ : int=5 , A__ : Union[str, Any]=4 , A__ : Optional[int]=37 , A__ : List[str]="gelu" , A__ : List[Any]=0.1 , A__ : Any=0.1 , A__ : Tuple=512 , A__ : Any=16 , A__ : int=2 , A__ : Tuple=0.02 , A__ : Optional[int]=3 , A__ : Union[str, Any]=4 , A__ : Union[str, Any]=None , ) -> Tuple:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def UpperCamelCase_ ( self : Optional[Any] ) -> Tuple:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : List[str] ) -> int:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : List[Any] , A__ : Any , A__ : str , A__ : Any , A__ : int , A__ : int , A__ : Tuple , A__ : List[str] ) -> Optional[int]:
_snake_case = BioGptModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_snake_case = model(snake_case__ , attention_mask=snake_case__ )
_snake_case = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , A__ : Optional[int] , A__ : Optional[int] , A__ : List[str] , A__ : Optional[Any] , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] , A__ : int , A__ : Dict , ) -> Optional[Any]:
_snake_case = BioGptForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_snake_case = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[int] , A__ : Union[str, Any] , A__ : int , A__ : Any , A__ : Tuple , A__ : Any , *A__ : List[Any] ) -> str:
_snake_case = BioGptModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# create attention mask
_snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case__ )
_snake_case = self.seq_length // 2
_snake_case = 0
# first forward pass
_snake_case = model(snake_case__ , attention_mask=snake_case__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_snake_case = ids_tensor((1,) , snake_case__ ).item() + 1
_snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_snake_case = random_other_next_tokens
# append to next input_ids and attn_mask
_snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case__ )] , dim=1 , )
# get two different outputs
_snake_case = model(snake_case__ , attention_mask=snake_case__ )['''last_hidden_state''']
_snake_case = model(snake_case__ , past_key_values=snake_case__ , attention_mask=snake_case__ )['''last_hidden_state''']
# select random slice
_snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
_snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def UpperCamelCase_ ( self : List[Any] , A__ : Optional[Any] , A__ : str , A__ : List[Any] , A__ : Any , A__ : List[str] , *A__ : List[Any] ) -> int:
_snake_case = BioGptModel(config=snake_case__ ).to(snake_case__ ).eval()
_snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case__ )
# first forward pass
_snake_case = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
_snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_snake_case = model(snake_case__ , attention_mask=snake_case__ )['''last_hidden_state''']
_snake_case = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[
'''last_hidden_state'''
]
# select random slice
_snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def UpperCamelCase_ ( self : Tuple , A__ : Optional[int] , A__ : Tuple , A__ : List[Any] , A__ : Union[str, Any] , A__ : Union[str, Any] , *A__ : Any , A__ : Dict=False ) -> Dict:
_snake_case = BioGptForCausalLM(snake_case__ )
model.to(snake_case__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_snake_case = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase_ ( self : Optional[Any] , A__ : int , *A__ : str ) -> Union[str, Any]:
_snake_case = BioGptModel(snake_case__ )
_snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCamelCase_ ( self : int , A__ : Dict , A__ : List[Any] , A__ : Dict , A__ : Tuple , A__ : Tuple , *A__ : Optional[Any] ) -> Optional[Any]:
_snake_case = self.num_labels
_snake_case = BioGptForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_snake_case = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ) -> Optional[Any]:
_snake_case = self.prepare_config_and_inputs()
(
_snake_case
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase_ : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ : int = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] = False
def UpperCamelCase_ ( self : int ) -> Union[str, Any]:
_snake_case = BioGptModelTester(self )
_snake_case = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCamelCase_ ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Any ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase_ ( self : Dict ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase_ ( self : Optional[Any] ) -> List[str]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case__ )
def UpperCamelCase_ ( self : Tuple ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case__ , gradient_checkpointing=snake_case__ )
def UpperCamelCase_ ( self : int ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case__ )
def UpperCamelCase_ ( self : Any ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case__ )
def UpperCamelCase_ ( self : Any ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase_ ( self : Any ) -> Union[str, Any]:
_snake_case = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(snake_case__ )
_snake_case = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_snake_case = '''left'''
# Define PAD Token = EOS Token = 50256
_snake_case = tokenizer.eos_token
_snake_case = model.config.eos_token_id
# use different length sentences to test batching
_snake_case = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_snake_case = tokenizer(snake_case__ , return_tensors='''pt''' , padding=snake_case__ )
_snake_case = inputs['''input_ids'''].to(snake_case__ )
_snake_case = model.generate(
input_ids=snake_case__ , attention_mask=inputs['''attention_mask'''].to(snake_case__ ) , )
_snake_case = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(snake_case__ )
_snake_case = model.generate(input_ids=snake_case__ )
_snake_case = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_snake_case = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(snake_case__ )
_snake_case = model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
_snake_case = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
_snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
_snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
_snake_case = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase_ ( self : List[Any] ) -> int:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = BioGptModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = input_dict['''input_ids''']
_snake_case = input_ids.ne(1 ).to(snake_case__ )
_snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case = BioGptForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_snake_case = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = '''multi_label_classification'''
_snake_case = input_dict['''input_ids''']
_snake_case = input_ids.ne(1 ).to(snake_case__ )
_snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_snake_case = BioGptForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_snake_case = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowercase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : str ) -> List[Any]:
_snake_case = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
_snake_case = torch.tensor([[2, 4805, 9, 656, 21]] )
_snake_case = model(snake_case__ )[0]
_snake_case = 42384
_snake_case = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case__ )
_snake_case = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ) -> Dict:
_snake_case = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_snake_case = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(snake_case__ )
torch.manual_seed(0 )
_snake_case = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(snake_case__ )
_snake_case = model.generate(
**snake_case__ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case__ , )
_snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
_snake_case = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(snake_case__ , snake_case__ )
| 362 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case_(_UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
_snake_case = checkpoints.load_tax_checkpoint(_UpperCamelCase )
_snake_case = flatten_dict(_UpperCamelCase )
return flax_params
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
_snake_case = {}
_snake_case = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
_snake_case = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_snake_case = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_snake_case = new_key.replace(_UpperCamelCase , _UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_snake_case = new_key.replace(_UpperCamelCase , _UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _UpperCamelCase )
_snake_case = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _UpperCamelCase )
_snake_case = flax_dict[key]
_snake_case = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_snake_case = torch.from_numpy(converted_dict[key].T )
else:
_snake_case = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> List[Any]:
"""simple docstring"""
_snake_case = get_flax_param(_UpperCamelCase )
if not use_large:
_snake_case = PixaStructVisionConfig()
_snake_case = PixaStructTextConfig()
else:
_snake_case = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
_snake_case = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
_snake_case = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_UpperCamelCase )
_snake_case = PixaStructForConditionalGeneration(_UpperCamelCase )
_snake_case = rename_and_convert_flax_params(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
_snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
_snake_case = PixaStructImageProcessor()
_snake_case = PixaStructProcessor(image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase )
if use_large:
_snake_case = 4_096
_snake_case = True
# mkdir if needed
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
print('''Model saved in {}'''.format(_UpperCamelCase ) )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 278 | 0 |
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[int] = None
__A : List[Any] = None
__A : Tuple = graph
self._normalize_graph(_A , _A )
__A : str = len(_A )
__A : str = None
def UpperCAmelCase_ ( self , _A , _A ):
if sources is int:
__A : int = [sources]
if sinks is int:
__A : Any = [sinks]
if len(_A ) == 0 or len(_A ) == 0:
return
__A : List[Any] = sources[0]
__A : Tuple = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_A ) > 1 or len(_A ) > 1:
__A : Optional[int] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[str] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : Any = max_input_flow
__A : Union[str, Any] = 0
__A : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : Dict = max_input_flow
__A : Optional[Any] = size - 1
def UpperCAmelCase_ ( self ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase_ ( self , _A ):
__A : Any = algorithm(self )
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : str = flow_network
__A : int = flow_network.verticesCount
__A : Any = flow_network.sourceIndex
__A : Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Tuple = flow_network.graph
__A : List[str] = False
def UpperCAmelCase_ ( self ):
if not self.executed:
self._algorithm()
__A : Any = True
def UpperCAmelCase_ ( self ):
pass
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
super().__init__(_A )
# use this to save your result
__A : List[Any] = -1
def UpperCAmelCase_ ( self ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
super().__init__(_A )
__A : Any = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : str = [0] * self.verticies_count
__A : List[str] = [0] * self.verticies_count
def UpperCAmelCase_ ( self ):
__A : Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : Optional[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Union[str, Any] = 0
while i < len(_A ):
__A : int = vertices_list[i]
__A : Dict = self.heights[vertex_index]
self.process_vertex(_A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_A ) )
__A : int = 0
else:
i += 1
__A : Optional[Any] = sum(self.preflow[self.source_index] )
def UpperCAmelCase_ ( self , _A ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_A , _A )
self.relabel(_A )
def UpperCAmelCase_ ( self , _A , _A ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Union[str, Any] = self.heights[to_index]
if min_height is not None:
__A : List[str] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase : int = [0]
UpperCAmelCase : Any = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase : List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase : int = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase : Optional[Any] = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 280 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
A: int = HUGGINGFACE_HUB_CACHE
A: Optional[Any] = "config.json"
A: Union[str, Any] = "diffusion_pytorch_model.bin"
A: List[str] = "diffusion_flax_model.msgpack"
A: List[Any] = "model.onnx"
A: int = "diffusion_pytorch_model.safetensors"
A: List[str] = "weights.pb"
A: List[str] = "https://huggingface.co"
A: str = default_cache_path
A: List[str] = "diffusers_modules"
A: Any = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
A: Dict = ["fp16", "non-ema"]
A: Any = ".self_attn"
| 76 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Dict = 'naver-clova-ix/donut-base-finetuned-docvqa'
__lowerCAmelCase : Dict = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__lowerCAmelCase : Union[str, Any] = 'document_qa'
__lowerCAmelCase : Optional[Any] = AutoProcessor
__lowerCAmelCase : List[Any] = VisionEncoderDecoderModel
__lowerCAmelCase : Union[str, Any] = ['image', 'text']
__lowerCAmelCase : str = ['text']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
UpperCAmelCase : Any = task_prompt.replace("""{user_input}""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.pre_processor.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
UpperCAmelCase : Optional[Any] = self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_SCREAMING_SNAKE_CASE , ).sequences
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
UpperCAmelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
UpperCAmelCase : Union[str, Any] = re.sub(r"""<.*?>""" , """""" , _SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCAmelCase : Tuple = self.pre_processor.tokenajson(_SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 76 | 1 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _A (__a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _A (__a , __a , __a , __a , __a=True ) -> Any:
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def _A (__a , __a=False ) -> Any:
"""simple docstring"""
set_seed(42 )
SCREAMING_SNAKE_CASE_ : Any = RegressionModel()
SCREAMING_SNAKE_CASE_ : List[str] = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE_ : Optional[Any] = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ : Optional[int] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LambdaLR(UpperCamelCase__ , lr_lambda=lambda __a : epoch**0.65 )
SCREAMING_SNAKE_CASE_ : Any = LambdaLR(UpperCamelCase__ , lr_lambda=lambda __a : epoch**0.65 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _A (__a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE_ : str = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ : int = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def _A (__a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE_ : Optional[int] = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_ : Tuple = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def _A (__a=False , __a=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ : Dict = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_ : str = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def _A (__a=False , __a=False ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ : Tuple = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : str = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ : List[str] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
SCREAMING_SNAKE_CASE_ : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_ : Any = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE_ : List[Any] = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE_ : List[str] = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : str = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def _A (__a ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 91 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 0 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(a ):
for j in range(a ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def _lowerCamelCase( a , a ):
__a = [[float("inf" ) for _ in range(a )] for _ in range(a )]
for i in range(a ):
for j in range(a ):
__a = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a ):
# looping through rows of graph array
for i in range(a ):
# looping through columns of graph array
for j in range(a ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__a = dist[i][k] + dist[k][j]
_print_dist(a , a )
return dist, v
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Dict = int(input("""Enter number of vertices: """))
SCREAMING_SNAKE_CASE__:Union[str, Any] = int(input("""Enter number of edges: """))
SCREAMING_SNAKE_CASE__:int = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
SCREAMING_SNAKE_CASE__:int = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
SCREAMING_SNAKE_CASE__:Optional[int] = int(input("""Enter source:"""))
SCREAMING_SNAKE_CASE__:str = int(input("""Enter destination:"""))
SCREAMING_SNAKE_CASE__:List[str] = float(input("""Enter weight:"""))
SCREAMING_SNAKE_CASE__:Dict = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 268 | """simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE__:Any = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
SCREAMING_SNAKE_CASE__:Optional[int] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
SCREAMING_SNAKE_CASE__:Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _lowerCamelCase( a ):
__a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _lowerCamelCase( a ):
return x[0]
def _lowerCamelCase( a ):
__a = get_letter_count(a )
__a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a )
__a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a )
__a = "".join(freq_to_letter[freq] )
__a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a , reverse=a )
__a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a )
def _lowerCamelCase( a ):
__a = get_frequency_order(a )
__a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__lowerCAmelCase = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
@lru_cache()
def snake_case_ ( ) -> Any:
lowercase__: Optional[Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__: Optional[int] = bs[:]
lowercase__: Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
lowercase__: List[Any] = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def snake_case_ ( snake_case ) -> Union[str, Any]:
lowercase__: Optional[Any] = set()
lowercase__: str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__: Tuple = char
return pairs
class __a ( lowerCAmelCase_ ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
lowercase__: str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
lowercase__: List[str] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
lowercase__: Union[str, Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
lowercase__: List[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
lowercase__: Optional[int] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Union[str, Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
with open(__snake_case , encoding='utf-8' ) as vocab_handle:
lowercase__: Dict = json.load(__snake_case )
lowercase__: List[str] = {v: k for k, v in self.encoder.items()}
lowercase__: Any = errors # how to handle errors in decoding
lowercase__: Union[str, Any] = bytes_to_unicode()
lowercase__: Any = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case , encoding='utf-8' ) as merges_handle:
lowercase__: int = merges_handle.read().split('\n' )[1:-1]
lowercase__: List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__: int = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowercase__: Dict = {}
lowercase__: int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__: Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase__: Optional[Any] = tuple(__snake_case )
lowercase__: Union[str, Any] = get_pairs(__snake_case )
if not pairs:
return token
while True:
lowercase__: Optional[Any] = min(__snake_case , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(__snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__: str = bigram
lowercase__: Optional[Any] = []
lowercase__: Dict = 0
while i < len(__snake_case ):
try:
lowercase__: Dict = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__: List[Any] = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__: List[str] = tuple(__snake_case )
lowercase__: Tuple = new_word
if len(__snake_case ) == 1:
break
else:
lowercase__: str = get_pairs(__snake_case )
lowercase__: Tuple = ' '.join(__snake_case )
lowercase__: Optional[int] = word
return word
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: List[Any] = []
for token in re.findall(self.pat , __snake_case ):
lowercase__: Tuple = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return self.decoder.get(__snake_case )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: List[str] = ''.join(__snake_case )
lowercase__: str = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: Optional[int] = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '\n' )
lowercase__: Tuple = 0
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowercase__: List[Any] = token_index
writer.write(' '.join(__snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Union[str, Any] = [self.cls_token_id]
lowercase__: int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: str = [self.sep_token_id]
lowercase__: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
lowercase__: Union[str, Any] = ' ' + text
return (text, kwargs)
| 196 |
_lowerCAmelCase : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowerCAmelCase : Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
_lowerCAmelCase : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 218 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , lowerCamelCase_ ):
UpperCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , eta=lowerCamelCase_ , use_clipped_model_output=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 165 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(
__lowerCAmelCase , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : GenericTensor ):
"""simple docstring"""
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCamelCase_ )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : GenericTensor ):
"""simple docstring"""
UpperCamelCase = self.get_masked_index(lowerCamelCase_ )
UpperCamelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : GenericTensor ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
if return_tensors is None:
UpperCamelCase = self.framework
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.ensure_exactly_one_mask_token(lowerCamelCase_ )
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.model(**lowerCamelCase_ )
UpperCamelCase = model_inputs["""input_ids"""]
return model_outputs
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Dict , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase = target_ids.shape[0]
UpperCamelCase = model_outputs["""input_ids"""][0]
UpperCamelCase = model_outputs["""logits"""]
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase = outputs.numpy()
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1 )
if target_ids is not None:
UpperCamelCase = tf.gather_nd(tf.squeeze(lowerCamelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase = tf.expand_dims(lowerCamelCase_ , 0 )
UpperCamelCase = tf.math.top_k(lowerCamelCase_ , k=lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCamelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase = probs[..., target_ids]
UpperCamelCase , UpperCamelCase = probs.topk(lowerCamelCase_ )
UpperCamelCase = []
UpperCamelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase = target_ids[p].tolist()
UpperCamelCase = p
# Filter padding out:
UpperCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(lowerCamelCase_ )
result.append(lowerCamelCase_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=None ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [targets]
try:
UpperCamelCase = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase = {}
UpperCamelCase = []
for target in targets:
UpperCamelCase = vocab.get(lowerCamelCase_ , lowerCamelCase_ )
if id_ is None:
UpperCamelCase = self.tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , max_length=1 , truncation=lowerCamelCase_ , )["""input_ids"""]
if len(lowerCamelCase_ ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
UpperCamelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCamelCase = list(set(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
UpperCamelCase = np.array(lowerCamelCase_ )
return target_ids
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Any=None ):
"""simple docstring"""
UpperCamelCase = {}
if targets is not None:
UpperCamelCase = self.get_target_ids(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = target_ids
if top_k is not None:
UpperCamelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : Tuple , lowerCamelCase_ : str , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) == 1:
return outputs[0]
return outputs
| 165 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__snake_case = """hf-internal-testing/tiny-random-bert"""
__snake_case = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
__snake_case = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : int = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Dict = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
snake_case : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
snake_case : Any = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="9b8c223" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
snake_case : Optional[Any] = cached_file("tiny-random-bert" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="aaaa" )
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : List[Any] = cached_file(UpperCamelCase__ , "conf" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , ".no_exist" , UpperCamelCase__ , "conf" ) ) )
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = cached_file(UpperCamelCase__ , "conf" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = mock.Mock()
snake_case : List[Any] = 500
snake_case : int = {}
snake_case : Optional[int] = HTTPError
snake_case : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCamelCase__ , revision="ahaha" )
snake_case : int = get_file_from_repo("bert-base-cased" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case : str = json.loads(open(UpperCamelCase__ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : int = Path(UpperCamelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , "a.txt" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , "b.txt" ) )
| 203 |
"""simple docstring"""
from typing import List
import numpy as np
def __lowerCAmelCase ( lowercase : dict ) -> int:
"""simple docstring"""
snake_case : Union[str, Any] = {key: len(lowercase ) for key, value in gen_kwargs.items() if isinstance(lowercase , lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
snake_case : int = max(lists_lengths.values() , default=0 )
return max(1 , lowercase )
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> List[range]:
"""simple docstring"""
snake_case : Union[str, Any] = []
for group_idx in range(lowercase ):
snake_case : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
snake_case : int = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
snake_case : Dict = range(lowercase , start + num_shards_to_add )
shards_indices_per_group.append(lowercase )
return shards_indices_per_group
def __lowerCAmelCase ( lowercase : dict , lowercase : int ) -> List[dict]:
"""simple docstring"""
snake_case : int = _number_of_shards_in_gen_kwargs(lowercase )
if num_shards == 1:
return [dict(lowercase )]
else:
snake_case : Optional[int] = _distribute_shards(num_shards=lowercase , max_num_jobs=lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowercase , lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowercase ) )
]
def __lowerCAmelCase ( lowercase : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowerCAmelCase ( lowercase : np.random.Generator , lowercase : dict ) -> dict:
"""simple docstring"""
snake_case : Tuple = {len(lowercase ) for value in gen_kwargs.values() if isinstance(lowercase , lowercase )}
snake_case : str = {}
for size in list_sizes:
snake_case : Optional[int] = list(range(lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
snake_case : Dict = dict(lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(lowercase , lowercase ):
snake_case : Dict = [value[i] for i in indices_per_size[len(lowercase )]]
return shuffled_kwargs
| 203 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : Optional[int] , __A : Any ) -> Any:
__lowerCAmelCase : Union[str, Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def snake_case_ (__A : List[str] , __A : str ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__lowerCAmelCase : Optional[Any] = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase : Tuple = in_proj_weight[
: encoder_config.hidden_size, :
]
__lowerCAmelCase : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__lowerCAmelCase : str = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case_ (__A : Union[str, Any] , __A : str , __A : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase : Any = dct.pop(__A )
__lowerCAmelCase : str = val
def snake_case_ (__A : int ) -> Tuple:
if "handwritten" in checkpoint_url:
__lowerCAmelCase : Tuple = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : Optional[Any] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
__lowerCAmelCase : Dict = Image.open(requests.get(__A , stream=__A ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def snake_case_ (__A : Any , __A : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase : List[Any] = ViTConfig(image_size=3_8_4 , qkv_bias=__A )
__lowerCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__lowerCAmelCase : Union[str, Any] = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__lowerCAmelCase : Any = 1_0_2_4
__lowerCAmelCase : Any = 4_0_9_6
__lowerCAmelCase : Optional[int] = 2_4
__lowerCAmelCase : str = 1_6
__lowerCAmelCase : List[Any] = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Union[str, Any] = """relu"""
__lowerCAmelCase : List[Any] = 1_0_2_4
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Dict = False
# load HuggingFace model
__lowerCAmelCase : Dict = ViTModel(__A , add_pooling_layer=__A )
__lowerCAmelCase : Union[str, Any] = TrOCRForCausalLM(__A )
__lowerCAmelCase : Any = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
# load state_dict of original model, rename some keys
__lowerCAmelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" , check_hash=__A )["""model"""]
__lowerCAmelCase : Any = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__lowerCAmelCase : Tuple = state_dict.pop(__A )
if key.startswith("""decoder""" ) and "output_projection" not in key:
__lowerCAmelCase : str = val
else:
__lowerCAmelCase : Tuple = val
# load state dict
model.load_state_dict(__A )
# Check outputs on an image
__lowerCAmelCase : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
__lowerCAmelCase : List[str] = RobertaTokenizer.from_pretrained("""roberta-large""" )
__lowerCAmelCase : List[Any] = TrOCRProcessor(__A , __A )
__lowerCAmelCase : List[str] = processor(images=prepare_img(__A ) , return_tensors="""pt""" ).pixel_values
# verify logits
__lowerCAmelCase : List[str] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__lowerCAmelCase : List[str] = model(pixel_values=__A , decoder_input_ids=__A )
__lowerCAmelCase : Optional[Any] = outputs.logits
__lowerCAmelCase : Union[str, Any] = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__lowerCAmelCase : Dict = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__lowerCAmelCase : List[Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__lowerCAmelCase : Tuple = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__lowerCAmelCase : List[Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , __A , atol=1e-3 ), "First elements of logits not as expected"
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__UpperCAmelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 139 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict=13 , lowerCAmelCase : int=7 , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : int=64 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : str=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[int] = embedding_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = type_vocab_size
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = num_labels
__lowerCAmelCase : Union[str, Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_input_mask:
__lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = MobileBertForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : int = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_labels
__lowerCAmelCase : int = MobileBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Dict = MobileBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.num_choices
__lowerCAmelCase : List[Any] = MobileBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] =(
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=False ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = MobileBertModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
def snake_case_ (__A : Any ) -> Optional[Any]:
return torch.tensor(
__A , dtype=torch.long , device=__A , )
__UpperCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase )
__lowerCAmelCase : int = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(lowerCAmelCase )[0]
__lowerCAmelCase : List[Any] = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowerCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 139 | 1 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = tempfile.mkdtemp()
lowercase_ : Any = 8
# DPR tok
lowercase_ : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase_ : Any = os.path.join(self.tmpdirname ,'dpr_tokenizer' )
os.makedirs(UpperCAmelCase__ ,exist_ok=UpperCAmelCase__ )
lowercase_ : Tuple = os.path.join(UpperCAmelCase__ ,DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowercase_ : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowercase_ : int = dict(zip(UpperCAmelCase__ ,range(len(UpperCAmelCase__ ) ) ) )
lowercase_ : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase_ : Optional[Any] = {'unk_token': '<unk>'}
lowercase_ : List[Any] = os.path.join(self.tmpdirname ,'bart_tokenizer' )
os.makedirs(UpperCAmelCase__ ,exist_ok=UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.join(UpperCAmelCase__ ,BART_VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : Tuple = os.path.join(UpperCAmelCase__ ,BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase__ ) )
def _UpperCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'dpr_tokenizer' ) )
def _UpperCAmelCase ( self ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'dpr_tokenizer' ) )
def _UpperCAmelCase ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'bart_tokenizer' ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' ,string_factory='Flat' ,metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Tuple = self.get_dummy_dataset()
lowercase_ : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,)
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowercase_ : List[str] = dataset
lowercase_ : Dict = RagRetriever(
UpperCAmelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
return retriever
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = self.get_dummy_dataset()
lowercase_ : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name='custom' ,)
if from_disk:
lowercase_ : Any = os.path.join(self.tmpdirname ,'dataset' )
lowercase_ : Any = os.path.join(self.tmpdirname ,'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname ,'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname ,'dataset' ) )
del dataset
lowercase_ : str = RagRetriever(
UpperCAmelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
else:
lowercase_ : List[Any] = RagRetriever(
UpperCAmelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,UpperCAmelCase__ ) ,)
return retriever
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[str] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' ,string_factory='Flat' ,metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase_ : int = os.path.join(self.tmpdirname ,'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' ,index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] ,open(index_file_name + '.index_meta.dpr' ,'wb' ) )
lowercase_ : str = os.path.join(self.tmpdirname ,'psgs_w100.tsv.pkl' )
lowercase_ : Union[str, Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(UpperCAmelCase__ ,open(UpperCAmelCase__ ,'wb' ) )
lowercase_ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name='legacy' ,index_path=self.tmpdirname ,)
lowercase_ : Tuple = RagRetriever(
UpperCAmelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = 1
lowercase_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
lowercase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ , lowercase_ , lowercase_ : Dict = retriever.retrieve(UpperCAmelCase__ ,n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) ,UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]['id'][0] ,'1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] ,'0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowercase_ : Dict = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase__ )
lowercase_ : Dict = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
lowercase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ : int = retriever.retrieve(UpperCAmelCase__ ,n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = 1
lowercase_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
lowercase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ , lowercase_ , lowercase_ : str = retriever.retrieve(UpperCAmelCase__ ,n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) ,UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]['id'][0] ,'1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] ,'0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
lowercase_ : List[Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
lowercase_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ : Optional[Any] = retriever.retrieve(UpperCAmelCase__ ,n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : int = 1
lowercase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
lowercase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ , lowercase_ , lowercase_ : List[Any] = retriever.retrieve(UpperCAmelCase__ ,n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) ,UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]['id'][0] ,'1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] ,'0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
lowercase_ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ : Tuple = retriever.retrieve(UpperCAmelCase__ ,n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = 1
lowercase_ : Optional[int] = self.get_dummy_legacy_index_retriever()
lowercase_ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = retriever.retrieve(UpperCAmelCase__ ,n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) ,UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]['text'][0] ,'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] ,'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
lowercase_ : Optional[Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
lowercase_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ : List[Any] = retriever.retrieve(UpperCAmelCase__ ,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
import torch
lowercase_ : List[Any] = 1
lowercase_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
lowercase_ : List[Any] = [[5, 7], [10, 11]]
lowercase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ : Tuple = retriever(UpperCAmelCase__ ,UpperCAmelCase__ ,prefix=retriever.config.generator.prefix ,n_docs=UpperCAmelCase__ )
lowercase_ , lowercase_ , lowercase_ : Tuple = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,np.ndarray )
lowercase_ : str = retriever(
UpperCAmelCase__ ,UpperCAmelCase__ ,prefix=retriever.config.generator.prefix ,n_docs=UpperCAmelCase__ ,return_tensors='pt' ,)
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__ ,torch.Tensor )
self.assertIsInstance(UpperCAmelCase__ ,torch.Tensor )
self.assertIsInstance(UpperCAmelCase__ ,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
lowercase_ : Tuple = 1
lowercase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase__ )
lowercase_ : int = [[5, 7], [10, 11]]
lowercase_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase_ : str = retriever(UpperCAmelCase__ ,UpperCAmelCase__ ,prefix=retriever.config.generator.prefix ,n_docs=UpperCAmelCase__ )
self.assertEqual(
len(UpperCAmelCase__ ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) ,UpperCAmelCase__ ) # check for doc token related keys in dictionary.
| 213 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set(range(3 , lowerCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = [float(lowerCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 54 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCAmelCase__ = get_logger(__name__)
class __lowerCAmelCase ( enum.Enum ):
UpperCamelCase = '''all_checks'''
UpperCamelCase = '''basic_checks'''
UpperCamelCase = '''no_checks'''
class __lowerCAmelCase ( _a ):
pass
class __lowerCAmelCase ( _a ):
pass
class __lowerCAmelCase ( _a ):
pass
class __lowerCAmelCase ( _a ):
pass
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int=None ) -> Any:
'''simple docstring'''
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) )
if len(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = """ for """ + verification_name if verification_name is not None else """"""
if len(_lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCAmelCase ( _a ):
pass
class __lowerCAmelCase ( _a ):
pass
class __lowerCAmelCase ( _a ):
pass
class __lowerCAmelCase ( _a ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) )
if len(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) )
_UpperCAmelCase = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] = True ) -> dict:
'''simple docstring'''
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(_lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'' ):
m.update(_lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(_lowerCAmelCase ), "checksum": checksum}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 371 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = KandinskyVaaPipeline
UpperCamelCase = [
'''image_embeds''',
'''negative_image_embeds''',
]
UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''']
UpperCamelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return 1_00
@property
def _lowerCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCAmelCase = UNetaDConditionModel(**A)
return model
@property
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.dummy_unet
_UpperCAmelCase = self.dummy_movq
_UpperCAmelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=A , set_alpha_to_one=A , steps_offset=1 , prediction_type='epsilon' , thresholding=A , )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowerCamelCase ( self : List[str] , A : str , A : Tuple=0) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
A)
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = pipe(**self.get_dummy_inputs(A))
_UpperCAmelCase = output.images
_UpperCAmelCase = pipe(
**self.get_dummy_inputs(A) , return_dict=A , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy')
_UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(A)
_UpperCAmelCase = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa)
_UpperCAmelCase = pipeline.to(A)
pipeline.set_progress_bar_config(disable=A)
_UpperCAmelCase = 'red cat, 4k photo'
_UpperCAmelCase = torch.Generator(device='cuda').manual_seed(0)
_UpperCAmelCase , _UpperCAmelCase = pipe_prior(
A , generator=A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_UpperCAmelCase = torch.Generator(device='cuda').manual_seed(0)
_UpperCAmelCase = pipeline(
image_embeds=A , negative_image_embeds=A , generator=A , num_inference_steps=1_00 , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A , A)
| 290 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowercase_ : Union[str, Any] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__SCREAMING_SNAKE_CASE ) , torch_builtin(__SCREAMING_SNAKE_CASE ) ) )
self.assertFalse(torch.allclose(gelu_python(__SCREAMING_SNAKE_CASE ) , gelu_new(__SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowercase_ : Tuple = get_activation('''gelu''' )
lowercase_ : Any = get_activation('''gelu_10''' )
lowercase_ : List[str] = torch_builtin(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = geluaa(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__SCREAMING_SNAKE_CASE ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self ):
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
get_activation('''bogus''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
get_activation(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = get_activation('''gelu''' )
lowercase_ : Any = 1
lowercase_ : str = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = acta.a
| 93 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (UniPCMultistepScheduler,)
lowerCamelCase__ : int = (('num_inference_steps', 25),)
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowerCamelCase_ )
return config
def a__ (self, lowerCamelCase_=0, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = dict(self.forward_default_kwargs )
lowerCamelCase__ : List[Any] = kwargs.pop('num_inference_steps', lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.dummy_sample
lowerCamelCase__ : Union[str, Any] = 0.1 * sample
lowerCamelCase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Any = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = sample, sample
for t in range(lowerCamelCase_, time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase__ : Any = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Any = new_scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ (self, lowerCamelCase_=0, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase__ : Union[str, Any] = kwargs.pop('num_inference_steps', lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.dummy_sample
lowerCamelCase__ : Dict = 0.1 * sample
lowerCamelCase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : List[Any] = self.get_scheduler_config()
lowerCamelCase__ : int = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__ : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : List[str] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ : Dict = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : str = new_scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ (self, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if scheduler is None:
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : str = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase__ : Tuple = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Tuple = 1_0
lowerCamelCase__ : Optional[int] = self.dummy_model()
lowerCamelCase__ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : Tuple = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
return sample
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = dict(self.forward_default_kwargs )
lowerCamelCase__ : Optional[Any] = kwargs.pop('num_inference_steps', lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Dict = self.get_scheduler_config()
lowerCamelCase__ : List[str] = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Any = self.dummy_sample
lowerCamelCase__ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_, 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_, 'set_timesteps' ):
lowerCamelCase__ : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCamelCase__ : Any = dummy_past_residuals[: scheduler.config.solver_order]
lowerCamelCase__ : List[Any] = scheduler.timesteps[5]
lowerCamelCase__ : Union[str, Any] = scheduler.timesteps[6]
lowerCamelCase__ : List[Any] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Dict = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowerCamelCase__ : str = self.full_loop(scheduler=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
lowerCamelCase__ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase__ : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ : int = self.full_loop(scheduler=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def a__ (self ):
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_, prediction_type=lowerCamelCase_, sample_max_value=lowerCamelCase_, solver_order=lowerCamelCase_, solver_type=lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_, solver_type=lowerCamelCase_, prediction_type=lowerCamelCase_, )
lowerCamelCase__ : str = self.full_loop(
solver_order=lowerCamelCase_, solver_type=lowerCamelCase_, prediction_type=lowerCamelCase_, )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def a__ (self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowerCamelCase_, time_step=0 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.full_loop()
lowerCamelCase__ : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase__ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase__ : Optional[int] = self.get_scheduler_config(thresholding=lowerCamelCase_, dynamic_thresholding_ratio=0 )
lowerCamelCase__ : Optional[Any] = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = 1_0
lowerCamelCase__ : List[str] = self.dummy_model()
lowerCamelCase__ : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Tuple = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 316 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=33 , __lowerCamelCase : int=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : int=37 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[Any]=None , ) -> int:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Dict ) -> int:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = EsmModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> str:
SCREAMING_SNAKE_CASE__ = EsmForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = EsmForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = False
a = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
a = ()
a = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def lowercase_ ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = EsmModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def lowercase_ ( self : List[str] ) -> Optional[int]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = EsmModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ = EsmEmbeddings(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE__ = create_position_ids_from_input_ids(__lowerCamelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ = EsmEmbeddings(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.empty(2 , 4 , 30 )
SCREAMING_SNAKE_CASE__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE__ = embeddings.create_position_ids_from_inputs_embeds(__lowerCamelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase_ ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase_ ( self : int ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : Any ) -> List[str]:
pass
@require_torch
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@slow
def lowercase_ ( self : Any ) -> Tuple:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = 33
SCREAMING_SNAKE_CASE__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def lowercase_ ( self : Any ) -> Dict:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
| 314 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314 | 1 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__A = sys.version_info >= (3, 10)
def __A (_SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :int
__magic_name__ :float
__magic_name__ :str
__magic_name__ :bool
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :int = 42
__magic_name__ :str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :bool = False
__magic_name__ :bool = True
__magic_name__ :Optional[bool] = None
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = """titi"""
__magic_name__ :Any = """toto"""
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """titi"""
__magic_name__ :Tuple = """toto"""
__magic_name__ :Optional[Any] = 42
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :BasicEnum = "toto"
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = BasicEnum(self.foo )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :MixedTypeEnum = "toto"
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MixedTypeEnum(self.foo )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[int] = None
__magic_name__ :Optional[float] = field(default=a , metadata={"""help""": """help message"""} )
__magic_name__ :Optional[str] = None
__magic_name__ :Optional[List[str]] = list_field(default=[] )
__magic_name__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :List[int] = list_field(default=[] )
__magic_name__ :List[int] = list_field(default=[1, 2, 3] )
__magic_name__ :List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
__magic_name__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :List[int] = field()
__magic_name__ :str = field()
__magic_name__ :BasicEnum = field()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = BasicEnum(self.required_enum )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :int
__magic_name__ :"BasicEnum" = field()
__magic_name__ :"Optional[bool]" = None
__magic_name__ :"str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
__magic_name__ :"List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :bool = False
__magic_name__ :bool = True
__magic_name__ :bool | None = None
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :int | None = None
__magic_name__ :float | None = field(default=a , metadata={"""help""": """help message"""} )
__magic_name__ :str | None = None
__magic_name__ :list[str] | None = list_field(default=[] )
__magic_name__ :list[int] | None = list_field(default=[] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ :Dict = {k: v for k, v in vars(__UpperCAmelCase ).items() if k != 'container'}
lowerCAmelCase__ :Optional[Any] = {k: v for k, v in vars(__UpperCAmelCase ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __UpperCAmelCase ) and yy.get('choices' , __UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__UpperCAmelCase ) , yy['type'](__UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument('--bar' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument('--baz' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument('--flag' , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs='?' )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
(lowerCAmelCase__ ) :List[str] = parser.parse_args_into_dataclasses(__UpperCAmelCase , look_for_args_file=__UpperCAmelCase )
self.assertFalse(example.flag )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :int = argparse.ArgumentParser()
expected.add_argument('--foo' , default=4_2 , type=__UpperCAmelCase )
expected.add_argument('--baz' , default='toto' , type=__UpperCAmelCase , help='help message' )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs='?' )
expected.add_argument('--baz' , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__UpperCAmelCase , dest='baz' )
expected.add_argument('--opt' , type=__UpperCAmelCase , default=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCAmelCase )
for dataclass_type in dataclass_types:
lowerCAmelCase__ :Dict = HfArgumentParser(__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = parser.parse_args([] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
lowerCAmelCase__ :List[Any] = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
lowerCAmelCase__ :List[Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
lowerCAmelCase__ :Dict = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 4_2] , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCAmelCase__ :Dict = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ :Any = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCAmelCase__ :List[Any] = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ :Optional[Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
lowerCAmelCase__ :Tuple = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case ( self ):
'''simple docstring'''
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ :Dict = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :Dict = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 4_2) , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCAmelCase__ :Optional[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCAmelCase__ :Tuple = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__UpperCAmelCase )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__UpperCAmelCase )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__UpperCAmelCase )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = parser.parse_args([] )
self.assertEqual(
__UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ :List[str] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__UpperCAmelCase , type=__UpperCAmelCase )
expected.add_argument('--bar' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='help message' )
expected.add_argument('--baz' , default=__UpperCAmelCase , type=__UpperCAmelCase )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__UpperCAmelCase )
expected.add_argument('--des' , nargs='+' , default=[] , type=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCAmelCase )
for dataclass_type in dataclass_types:
lowerCAmelCase__ :str = HfArgumentParser(__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = parser.parse_args([] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , bar=__UpperCAmelCase , baz=__UpperCAmelCase , ces=[] , des=[] ) )
lowerCAmelCase__ :Tuple = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__UpperCAmelCase , Namespace(foo=1_2 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument('--required_str' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__UpperCAmelCase , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__UpperCAmelCase , )
expected.add_argument('--opt' , type=__UpperCAmelCase , default=__UpperCAmelCase )
expected.add_argument('--baz' , default='toto' , type=__UpperCAmelCase , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
lowerCAmelCase__ :List[str] = parser.parse_dict(__UpperCAmelCase )[0]
lowerCAmelCase__ :Any = BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :str = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 4_2,
}
self.assertRaises(__UpperCAmelCase , parser.parse_dict , __UpperCAmelCase , allow_extra_keys=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ :List[Any] = os.path.join(__UpperCAmelCase , 'temp_json' )
os.mkdir(__UpperCAmelCase )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
lowerCAmelCase__ :Union[str, Any] = BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = HfArgumentParser(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ :Optional[int] = os.path.join(__UpperCAmelCase , 'temp_yaml' )
os.mkdir(__UpperCAmelCase )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
lowerCAmelCase__ :Any = BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = HfArgumentParser(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 353 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :List[str] = seq_length
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :Tuple = use_input_mask
lowerCAmelCase__ :Dict = use_token_type_ids
lowerCAmelCase__ :Union[str, Any] = use_labels
lowerCAmelCase__ :Tuple = vocab_size
lowerCAmelCase__ :List[Any] = hidden_size
lowerCAmelCase__ :Tuple = num_hidden_layers
lowerCAmelCase__ :str = num_attention_heads
lowerCAmelCase__ :List[str] = intermediate_size
lowerCAmelCase__ :Optional[Any] = hidden_act
lowerCAmelCase__ :Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ :Any = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = max_position_embeddings
lowerCAmelCase__ :Tuple = type_vocab_size
lowerCAmelCase__ :List[str] = type_sequence_label_size
lowerCAmelCase__ :Tuple = initializer_range
lowerCAmelCase__ :Optional[Any] = num_labels
lowerCAmelCase__ :int = num_choices
lowerCAmelCase__ :Union[str, Any] = relative_attention
lowerCAmelCase__ :int = position_biased_input
lowerCAmelCase__ :Optional[int] = pos_att_type
lowerCAmelCase__ :Dict = scope
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :int = None
if self.use_input_mask:
lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ :Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :Dict = None
if self.use_labels:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ :Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_config()
lowerCAmelCase__ :Optional[Any] = 3_0_0
return config
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = DebertaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :Dict = model(__UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = DebertaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.num_labels
lowerCAmelCase__ :int = DebertaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.num_labels
lowerCAmelCase__ :Any = DebertaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = DebertaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :str = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Tuple = config_and_inputs
lowerCAmelCase__ :int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ :Optional[Any] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ :Tuple = True
__magic_name__ :List[Any] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :str = False
__magic_name__ :int = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = DebertaModelTester(self )
lowerCAmelCase__ :List[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :int = DebertaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def snake_case ( self ):
'''simple docstring'''
pass
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = DebertaModel.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase__ :str = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase__ :Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ :int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ :str = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 254 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =dataset
lowerCamelCase__: List[str] =process
lowerCamelCase__: Any =params
def __len__(self : Optional[int]) ->Any:
'''simple docstring'''
return len(self.dataset)
def __getitem__(self : Any , UpperCAmelCase_ : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.dataset[i]
lowerCamelCase__: int =self.process(UpperCAmelCase_ , **self.params)
return processed
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=None) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =loader
lowerCamelCase__: int =infer
lowerCamelCase__: List[str] =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__: List[Any] =None
lowerCamelCase__: Tuple =loader_batch_size
# Internal bookkeeping
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: List[Any] =None
def __len__(self : Dict) ->Any:
'''simple docstring'''
return len(self.loader)
def __iter__(self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =iter(self.loader)
return self
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__: Any =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__: Optional[int] ={}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Convert ModelOutput to tuple first
lowerCamelCase__: int =element.to_tuple()
if isinstance(element[0] , torch.Tensor):
lowerCamelCase__: Dict =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
lowerCamelCase__: Union[str, Any] =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
lowerCamelCase__: Any =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
lowerCamelCase__: Union[str, Any] =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__: List[str] =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__: Optional[int] =element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__: Dict =np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__: List[str] =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__: Optional[int] =self._loader_batch_data.__class__(UpperCAmelCase_)
self._loader_batch_index += 1
return result
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__: int =next(self.iterator)
lowerCamelCase__: List[str] =self.infer(UpperCAmelCase_ , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase_ , torch.Tensor):
lowerCamelCase__: List[Any] =processed
else:
lowerCamelCase__: Optional[int] =list(processed.keys())[0]
lowerCamelCase__: Dict =processed[key]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Optional[int] =len(UpperCAmelCase_)
else:
lowerCamelCase__: str =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__: str =observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__: List[Any] =processed
lowerCamelCase__: List[Any] =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=None) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __iter__(self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =iter(self.loader)
lowerCamelCase__: int =None
return self
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
if self.subiterator is None:
lowerCamelCase__: Dict =self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
lowerCamelCase__: Any =next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__: Optional[int] =self.infer(next(self.iterator) , **self.params)
lowerCamelCase__: int =next(self.subiterator)
return processed
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __iter__(self : Dict) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =iter(self.loader)
return self
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =False
lowerCamelCase__: str =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__: List[str] =self.loader_batch_item()
lowerCamelCase__: Union[str, Any] =item.pop("is_last")
accumulator.append(UpperCAmelCase_)
if is_last:
return accumulator
while not is_last:
lowerCamelCase__: Optional[Any] =self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase_ , torch.Tensor):
lowerCamelCase__: Optional[int] =processed
else:
lowerCamelCase__: List[str] =list(processed.keys())[0]
lowerCamelCase__: Union[str, Any] =processed[key]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
else:
lowerCamelCase__: Any =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__: int =observed_batch_size
lowerCamelCase__: Union[str, Any] =processed
lowerCamelCase__: Dict =0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__: Dict =self.loader_batch_item()
lowerCamelCase__: Tuple =item.pop("is_last")
accumulator.append(UpperCAmelCase_)
if is_last:
return accumulator
else:
lowerCamelCase__: int =processed
lowerCamelCase__: int =item.pop("is_last")
accumulator.append(UpperCAmelCase_)
return accumulator
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =dataset
lowerCamelCase__: int =key
def __len__(self : List[Any]) ->List[str]:
'''simple docstring'''
return len(self.dataset)
def __getitem__(self : Dict , UpperCAmelCase_ : str) ->Union[str, Any]:
'''simple docstring'''
return self.dataset[i][self.key]
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str , UpperCAmelCase_ : str) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] =dataset
lowerCamelCase__: Union[str, Any] =keya
lowerCamelCase__: str =keya
def __len__(self : List[str]) ->int:
'''simple docstring'''
return len(self.dataset)
def __getitem__(self : Optional[Any] , UpperCAmelCase_ : Dict) ->List[Any]:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 10 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase : List[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def __lowerCamelCase ( A__ , A__ , A__=None ) -> Tuple:
"""simple docstring"""
if rng is None:
UpperCamelCase = random.Random()
UpperCamelCase = 1
for dim in shape:
total_dims *= dim
UpperCamelCase = []
for _ in range(_a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCamelCase = np.array(_a , dtype=jnp.intaa ).reshape(_a )
return output
def __lowerCamelCase ( A__ , A__=None ) -> int:
"""simple docstring"""
UpperCamelCase = ids_tensor(_a , vocab_size=2 , rng=_a )
# make sure that at least one token is attended to for each batch
UpperCamelCase = 1
return attn_mask
@require_flax
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = ()
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCamelCase = 2
UpperCamelCase = inputs["""input_ids"""].shape[-1] // 2
UpperCamelCase = inputs["""input_ids"""][:max_batch_size, :sequence_length]
UpperCamelCase = jnp.ones_like(lowerCamelCase_ )
UpperCamelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCamelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCamelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = False
UpperCamelCase = max_length
UpperCamelCase = 0
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = pt_model_class(lowerCamelCase_ ).eval()
UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase_ , flax_model.params )
UpperCamelCase = flax_model.generate(lowerCamelCase_ ).sequences
UpperCamelCase = pt_model.generate(torch.tensor(lowerCamelCase_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCamelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = False
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = True
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = False
UpperCamelCase = max_length
UpperCamelCase = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = False
UpperCamelCase = max_length
UpperCamelCase = 2
UpperCamelCase = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = True
UpperCamelCase = max_length
UpperCamelCase = 0.8
UpperCamelCase = 1_0
UpperCamelCase = 0.3
UpperCamelCase = 1
UpperCamelCase = 8
UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = max_length
UpperCamelCase = 1
UpperCamelCase = 8
UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = max_length
UpperCamelCase = 2
UpperCamelCase = 1
UpperCamelCase = 8
UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase = False
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase = True
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase = 2
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
UpperCamelCase = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCamelCase = """Hello world"""
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCamelCase_ , 'do_samples' ):
model.generate(lowerCamelCase_ , do_samples=lowerCamelCase_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCamelCase_ , 'foo' ):
UpperCamelCase = {"""foo""": """bar"""}
model.generate(lowerCamelCase_ , **lowerCamelCase_ )
| 365 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 0
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('image_processor_type' )
UpperCamelCase = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'clip-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase = AutoImageProcessor.from_pretrained('clip-base' )
def A ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='aaaaaa' )
def A ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def A ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register('custom' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A ( self : Optional[int] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register('custom' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(UpperCamelCase__ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 249 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ = Lock()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCamelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCamelCase = min(__UpperCamelCase , __UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCamelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCamelCase = max(__UpperCamelCase , __UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> Tuple:
UpperCamelCase = []
UpperCamelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCamelCase = Pipe()
UpperCamelCase = Pipe()
process_array_.append(
Process(
target=__UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCamelCase = temp_rs
UpperCamelCase = temp_rr
for i in range(1 , len(__UpperCamelCase ) - 1 ):
UpperCamelCase = Pipe()
UpperCamelCase = Pipe()
process_array_.append(
Process(
target=__UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCamelCase = temp_rs
UpperCamelCase = temp_rr
process_array_.append(
Process(
target=__UpperCamelCase , args=(
len(__UpperCamelCase ) - 1,
arr[len(__UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__UpperCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__UpperCamelCase ) ):
UpperCamelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase__ ( )-> Any:
UpperCamelCase = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*__UpperCamelCase )
UpperCamelCase = odd_even_transposition(__UpperCamelCase )
print("""Sorted List\n""" )
print(*__UpperCamelCase )
if __name__ == "__main__":
main()
| 321 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml'
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = defaultdict(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__UpperCamelCase )
UpperCamelCase = new_doc_list
UpperCamelCase = [key for key, value in counts.items() if value > 1]
UpperCamelCase = []
for duplicate_key in duplicates:
UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def lowercase__ ( __UpperCamelCase=False )-> List[str]:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase = api_doc[scheduler_idx]["""sections"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
UpperCamelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowercase__ ( __UpperCamelCase=False )-> Tuple:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase = False
UpperCamelCase = api_doc[pipeline_idx]["""sections"""]
UpperCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase = pipeline_doc["""section"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if overwrite:
UpperCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 321 | 1 |
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = n
_UpperCAmelCase = [None] * self.n
_UpperCAmelCase = 0 # index of the first element
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def __len__( self ) -> int:
"""simple docstring"""
return self.size
def UpperCAmelCase__ ( self ) -> bool:
"""simple docstring"""
return self.size == 0
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_UpperCAmelCase = data
_UpperCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
if self.size == 0:
raise Exception('UNDERFLOW' )
_UpperCAmelCase = self.array[self.front]
_UpperCAmelCase = None
_UpperCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 363 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __a ( pl.LightningModule ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = model
_UpperCAmelCase = 2
_UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__: str , a__: str , a__: str ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = LongformerModel.from_pretrained(a__ )
_UpperCAmelCase = LightningModel(a__ )
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
lowerCAmelCase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 185 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : int = BlipImageProcessor()
snake_case : List[str] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
snake_case : List[str] = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
snake_case : List[Any] = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : Optional[int] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : int = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
snake_case : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
snake_case : Any = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = self.get_image_processor()
snake_case : str = self.get_tokenizer()
snake_case : Dict = self.get_qformer_tokenizer()
snake_case : Optional[int] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
snake_case : Union[str, Any] = self.prepare_image_inputs()
snake_case : Dict = image_processor(UpperCamelCase__ , return_tensors="np" )
snake_case : Any = processor(images=UpperCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : Tuple = self.get_qformer_tokenizer()
snake_case : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
snake_case : Dict = "lower newer"
snake_case : Union[str, Any] = processor(text=UpperCamelCase__ )
snake_case : Dict = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
snake_case : Optional[Any] = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : Optional[int] = self.get_qformer_tokenizer()
snake_case : Any = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
snake_case : Optional[int] = "lower newer"
snake_case : Any = self.prepare_image_inputs()
snake_case : str = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : List[str] = self.get_image_processor()
snake_case : List[Any] = self.get_tokenizer()
snake_case : List[str] = self.get_qformer_tokenizer()
snake_case : Any = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
snake_case : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : str = processor.batch_decode(UpperCamelCase__ )
snake_case : int = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = self.get_image_processor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : str = self.get_qformer_tokenizer()
snake_case : List[Any] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
snake_case : int = "lower newer"
snake_case : Union[str, Any] = self.prepare_image_inputs()
snake_case : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 203 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case : Dict = 4
snake_case : str = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 203 | 1 |
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
a = "EncodecFeatureExtractor"
a = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple):
super().__init__(_lowercase , _lowercase)
_A : Dict = self.feature_extractor
_A : int = False
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : int=True):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase)
def __call__( self : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : int):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase)
_A : List[Any] = kwargs.pop('audio' , _lowercase)
_A : List[str] = kwargs.pop('sampling_rate' , _lowercase)
_A : Dict = kwargs.pop('text' , _lowercase)
if len(_lowercase) > 0:
_A : Optional[Any] = args[0]
_A : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if text is not None:
_A : str = self.tokenizer(_lowercase , **_lowercase)
if audio is not None:
_A : str = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_A : Tuple = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_A : Optional[int] = audio_inputs['padding_mask']
return inputs
def A ( self : str , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any]):
_A : Union[str, Any] = kwargs.pop('audio' , _lowercase)
_A : str = kwargs.pop('padding_mask' , _lowercase)
if len(_lowercase) > 0:
_A : Union[str, Any] = args[0]
_A : Tuple = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase)
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def A ( self : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any]):
return self.tokenizer.decode(*_lowercase , **_lowercase)
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional = None):
_A : int = to_numpy(_lowercase)
_A , _A , _A : Any = audio_values.shape
if padding_mask is None:
return list(_lowercase)
_A : Any = to_numpy(_lowercase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_A : List[str] = seq_len - padding_mask.shape[-1]
_A : List[str] = 1 - self.feature_extractor.padding_value
_A : List[str] = np.pad(_lowercase , ((0, 0), (0, difference)) , 'constant' , constant_values=_lowercase)
_A : Optional[Any] = audio_values.tolist()
for i in range(_lowercase):
_A : List[str] = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_A : Dict = sliced_audio.reshape(_lowercase , -1)
return audio_values
| 370 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : int = 10 ):
if not isinstance(lowerCamelCase ,lowerCamelCase ) or n < 0:
raise ValueError('Invalid input' )
_A : Optional[Any] = 10**n
_A : List[str] = 28433 * (pow(2 ,7830457 ,lowerCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 227 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case : Tuple = 1_92
_snake_case : Any = 7_68
_snake_case : Any = 12
_snake_case : List[Any] = 3
_snake_case : int = [8_00, 13_33]
_snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = 3_30
_snake_case : List[str] = 14
_snake_case : List[str] = 6
_snake_case : Union[str, Any] = 13_20
elif "yolos_s" in yolos_name:
_snake_case : Union[str, Any] = 3_84
_snake_case : List[str] = 15_36
_snake_case : Any = 12
_snake_case : Optional[int] = 6
elif "yolos_b" in yolos_name:
_snake_case : Dict = [8_00, 13_44]
_snake_case : str = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : str = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: config.hidden_size, :]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[-config.hidden_size :, :]
_snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "backbone" in name:
_snake_case : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_snake_case : str = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_snake_case : Optional[Any] = key.split(""".""" )
_snake_case : Optional[Any] = int(key_split[2] )
_snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : Optional[Any] = val[
dim : dim * 2, :
]
_snake_case : Optional[Any] = val[-dim:, :]
else:
_snake_case : Dict = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
else:
_snake_case : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : Optional[Any] = get_yolos_config(snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ )
model.eval()
_snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12
_snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
_snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case : Dict = None, None
if yolos_name == "yolos_ti":
_snake_case : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_snake_case : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_snake_case : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_snake_case : Union[str, Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_snake_case : Optional[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_snake_case : int = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_snake_case : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_snake_case : Dict = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 322 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase__ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 322 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase = None
lowercase = logging.get_logger(__name__)
lowercase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
lowercase = {
"camembert-base": 512,
}
lowercase = "▁"
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , a=None , a=None , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=["<s>NOTUSED", "</s>NOTUSED"] , **a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , additional_special_tokens=a , **a , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 178 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 178 | 1 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _lowercase ( __snake_case ,__snake_case = True ,__snake_case = math.inf ,__snake_case = -math.inf ,__snake_case = math.inf ,__snake_case = -math.inf ,__snake_case = False ,__snake_case = 100 ,__snake_case = 0.01 ,__snake_case = 1 ,) -> Any:
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = search_prob
__lowerCAmelCase : str = start_temperate
__lowerCAmelCase : Dict = []
__lowerCAmelCase : str = 0
__lowerCAmelCase : str = None
while not search_end:
__lowerCAmelCase : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__lowerCAmelCase : Union[str, Any] = current_state
scores.append(__snake_case )
iterations += 1
__lowerCAmelCase : str = None
__lowerCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__lowerCAmelCase : Any = random.randint(0 ,len(__snake_case ) - 1 ) # picking a random neighbor
__lowerCAmelCase : Union[str, Any] = neighbors.pop(__snake_case )
__lowerCAmelCase : Optional[int] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__lowerCAmelCase : Dict = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__lowerCAmelCase : Any = picked_neighbor
else:
__lowerCAmelCase : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__lowerCAmelCase : Optional[int] = picked_neighbor
__lowerCAmelCase : Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__lowerCAmelCase : Union[str, Any] = True
else:
__lowerCAmelCase : Any = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) ,__snake_case )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__snake_case : int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case : Tuple = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
__snake_case : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case : Tuple = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
return (3 * x**2) - (6 * y)
__snake_case : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case : Tuple = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
__snake_case : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
) | 58 |
"""simple docstring"""
__snake_case : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case : Union[str, Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case : Any = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case : str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['ChineseCLIPFeatureExtractor']
UpperCamelCase_ = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 243 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = args.log_outputs
lowerCAmelCase__ : Union[str, Any] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowerCAmelCase__ : Dict = load_metric('wer' )
lowerCAmelCase__ : Tuple = load_metric('cer' )
# compute metrics
lowerCAmelCase__ : Dict = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowerCAmelCase__ : int = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowerCAmelCase__ : Optional[int] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(SCREAMING_SNAKE_CASE_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase__ : List[str] = F'''log_{dataset_id}_predictions.txt'''
lowerCAmelCase__ : Union[str, Any] = F'''log_{dataset_id}_targets.txt'''
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as p, open(SCREAMING_SNAKE_CASE_ , 'w' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(SCREAMING_SNAKE_CASE_ , with_indices=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : List[str] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase__ : Union[str, Any] = re.sub(SCREAMING_SNAKE_CASE_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase__ : List[Any] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
lowerCAmelCase__ : Optional[int] = ' '.join(text.split(SCREAMING_SNAKE_CASE_ ) )
return text
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# load dataset
lowerCAmelCase__ : Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase__ : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase__ : Union[str, Any] = dataset.cast_column('audio' , Audio(sampling_rate=SCREAMING_SNAKE_CASE_ ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase__ : List[Any] = 0 if torch.cuda.is_available() else -1
lowerCAmelCase__ : List[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase__ : int = prediction['text']
lowerCAmelCase__ : Optional[int] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowerCAmelCase__ : Dict = dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase__ = parser.parse_args()
main(args) | 212 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Any = ["""image_processor""", """tokenizer"""]
_lowerCAmelCase : Any = """CLIPImageProcessor"""
_lowerCAmelCase : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] , lowercase_ : Dict=None , lowercase_ : Dict=None , **lowercase_ : Any ):
snake_case_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
snake_case_ : Any = kwargs.pop('''feature_extractor''' )
snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Optional[int] , lowercase_ : Any=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None , **lowercase_ : Any ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case_ : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
snake_case_ : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
snake_case_ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def _snake_case ( self : str , *lowercase_ : Any , **lowercase_ : Optional[Any] ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _snake_case ( self : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Any ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _snake_case ( self : List[Any] ):
snake_case_ : Optional[Any] = self.tokenizer.model_input_names
snake_case_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self : int ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def _snake_case ( self : Tuple ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 155 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Optional[Any] = """table-transformer"""
_lowerCAmelCase : Any = ["""past_key_values"""]
_lowerCAmelCase : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Any , lowercase_ : Any=True , lowercase_ : Dict=None , lowercase_ : Optional[int]=3 , lowercase_ : Any=100 , lowercase_ : List[str]=6 , lowercase_ : Any=2048 , lowercase_ : Any=8 , lowercase_ : Tuple=6 , lowercase_ : List[Any]=2048 , lowercase_ : List[str]=8 , lowercase_ : List[Any]=0.0 , lowercase_ : str=0.0 , lowercase_ : Dict=True , lowercase_ : Optional[int]="relu" , lowercase_ : Dict=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1.0 , lowercase_ : Tuple=False , lowercase_ : Optional[Any]="sine" , lowercase_ : Union[str, Any]="resnet50" , lowercase_ : List[Any]=True , lowercase_ : List[Any]=False , lowercase_ : Optional[Any]=1 , lowercase_ : Dict=5 , lowercase_ : List[Any]=2 , lowercase_ : Tuple=1 , lowercase_ : List[Any]=1 , lowercase_ : Dict=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=0.1 , **lowercase_ : int , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case_ : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ : List[Any] = backbone_config.get('''model_type''' )
snake_case_ : int = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[str] = config_class.from_dict(lowercase_ )
# set timm attributes to None
snake_case_, snake_case_, snake_case_ : List[str] = None, None, None
snake_case_ : Tuple = use_timm_backbone
snake_case_ : int = backbone_config
snake_case_ : str = num_channels
snake_case_ : List[str] = num_queries
snake_case_ : int = d_model
snake_case_ : List[str] = encoder_ffn_dim
snake_case_ : Any = encoder_layers
snake_case_ : List[Any] = encoder_attention_heads
snake_case_ : Optional[int] = decoder_ffn_dim
snake_case_ : Tuple = decoder_layers
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : Tuple = dropout
snake_case_ : Union[str, Any] = attention_dropout
snake_case_ : Dict = activation_dropout
snake_case_ : Optional[Any] = activation_function
snake_case_ : Optional[Any] = init_std
snake_case_ : str = init_xavier_std
snake_case_ : Any = encoder_layerdrop
snake_case_ : Optional[Any] = decoder_layerdrop
snake_case_ : List[str] = encoder_layers
snake_case_ : Optional[int] = auxiliary_loss
snake_case_ : List[Any] = position_embedding_type
snake_case_ : List[Any] = backbone
snake_case_ : Union[str, Any] = use_pretrained_backbone
snake_case_ : Optional[Any] = dilation
# Hungarian matcher
snake_case_ : Tuple = class_cost
snake_case_ : Any = bbox_cost
snake_case_ : Dict = giou_cost
# Loss coefficients
snake_case_ : Optional[Any] = mask_loss_coefficient
snake_case_ : str = dice_loss_coefficient
snake_case_ : List[str] = bbox_loss_coefficient
snake_case_ : int = giou_loss_coefficient
snake_case_ : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def _snake_case ( self : Any ):
return self.d_model
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = version.parse("""1.11""")
@property
def _snake_case ( self : List[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _snake_case ( self : int ):
return 1E-5
@property
def _snake_case ( self : Optional[int] ):
return 12
| 155 | 1 |
'''simple docstring'''
from PIL import Image
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image:
def brightness(_lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__a = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 35 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
_UpperCamelCase : Optional[int] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Union[str, Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __a ( self : List[Any] , _A : str , _A : Optional[Any] , _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] = AudioClassificationPipeline(model=_A , feature_extractor=_A )
# test with a raw waveform
lowercase : Tuple = np.zeros((34_000,) )
lowercase : Dict = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def __a ( self : List[Any] , _A : List[Any] , _A : List[str] ) -> List[str]:
"""simple docstring"""
lowercase , lowercase : List[str] = examples
lowercase : int = audio_classifier(_A )
# by default a model is initialized with num_labels=2
self.assertEqual(
_A , [
{'''score''': ANY(_A ), '''label''': ANY(_A )},
{'''score''': ANY(_A ), '''label''': ANY(_A )},
] , )
lowercase : Dict = audio_classifier(_A , top_k=1 )
self.assertEqual(
_A , [
{'''score''': ANY(_A ), '''label''': ANY(_A )},
] , )
self.run_torchaudio(_A )
@require_torchaudio
def __a ( self : List[str] , _A : Optional[Any] ) -> int:
"""simple docstring"""
import datasets
# test with a local file
lowercase : Union[str, Any] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
lowercase : Optional[Any] = dataset[0]['''audio''']['''array''']
lowercase : Any = audio_classifier(_A )
self.assertEqual(
_A , [
{'''score''': ANY(_A ), '''label''': ANY(_A )},
{'''score''': ANY(_A ), '''label''': ANY(_A )},
] , )
@require_torch
def __a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase : str = pipeline('''audio-classification''' , model=_A )
lowercase : Optional[Any] = np.ones((8_000,) )
lowercase : Tuple = audio_classifier(_A , top_k=4 )
lowercase : Dict = [
{'''score''': 0.0_842, '''label''': '''no'''},
{'''score''': 0.0_838, '''label''': '''up'''},
{'''score''': 0.0_837, '''label''': '''go'''},
{'''score''': 0.0_834, '''label''': '''right'''},
]
lowercase : int = [
{'''score''': 0.0_845, '''label''': '''stop'''},
{'''score''': 0.0_844, '''label''': '''on'''},
{'''score''': 0.0_841, '''label''': '''right'''},
{'''score''': 0.0_834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_A , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase : Optional[int] = {'''array''': np.ones((8_000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase : Any = audio_classifier(_A , top_k=4 )
self.assertIn(nested_simplify(_A , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __a ( self : str ) -> str:
"""simple docstring"""
import datasets
lowercase : Tuple = '''superb/wav2vec2-base-superb-ks'''
lowercase : List[str] = pipeline('''audio-classification''' , model=_A )
lowercase : Optional[int] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
lowercase : int = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
lowercase : Any = audio_classifier(_A , top_k=4 )
self.assertEqual(
nested_simplify(_A , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def __a ( self : int ) -> Any:
"""simple docstring"""
pass | 116 |
class _A : # Public class to implement a graph
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : Tuple = row
lowercase : Union[str, Any] = col
lowercase : int = graph
def __a ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __a ( self : int , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase : Dict = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __a ( self : List[str] ) -> int: # And finally, count all islands.
"""simple docstring"""
lowercase : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 116 | 1 |
_SCREAMING_SNAKE_CASE = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_SCREAMING_SNAKE_CASE = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 327 |
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any:
"""simple docstring"""
if tokenize_kwargs is None:
snake_case_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
snake_case_ : int = truncation
snake_case_ : Optional[int] = tokenize_kwargs
snake_case_ : Dict = {}
if return_tensors is not None:
snake_case_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]:
"""simple docstring"""
snake_case_ : Dict = self.framework
snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A )
return model_inputs
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Tuple = self.model(**_A )
return model_outputs
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 327 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ ={
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ =['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ =['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ =[
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->List[str]:
A__ : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
A__ : int = 1_0_2_4
A__ : Union[str, Any] = 4_0_9_6
A__ : Optional[int] = 2_4
A__ : int = 1_6
A__ : Union[str, Any] = [5, 1_1, 1_7, 2_3]
A__ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
A__ : Tuple = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
A__ : Optional[int] = True
A__ : int = 1_5_0
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : List[Any] = """ade20k-id2label.json"""
A__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) )
A__ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
A__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Any:
A__ : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[str]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Dict = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
A__ : List[Any] = name.replace("""patch_embed""", """patch_embeddings""" )
if "pos_embed" in name:
A__ : int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
A__ : Tuple = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : List[Any] = name.replace("""proj""", """projection""" )
if "blocks" in name:
A__ : Optional[Any] = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
A__ : int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name:
A__ : Any = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : List[str] = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : Optional[int] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
A__ : List[str] = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
A__ : List[str] = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
A__ : Optional[int] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
A__ : Any = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
A__ : Any = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
A__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : str = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A__ : Optional[Any] = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
A__ : List[Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
A__ : Tuple = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
A__ : Tuple = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
A__ : List[Any] = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : Union[str, Any] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
A__ : Union[str, Any] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
A__ : Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
A__ : Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
A__ : List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
return name
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[str] = in_proj_weight[: config.hidden_size, :]
A__ : int = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str:
A__ , A__ : Dict = get_dpt_config(UpperCAmelCase__ )
# load original state_dict from URL
A__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(UpperCAmelCase__ )
A__ : str = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# Check outputs on an image
A__ : Optional[Any] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
A__ : Dict = DPTImageProcessor(size=UpperCAmelCase__ )
A__ : Optional[int] = prepare_img()
A__ : Any = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
# forward pass
A__ : List[str] = model(**UpperCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCAmelCase__ ).predicted_depth
# Assert logits
A__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
A__ : Optional[int] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], UpperCAmelCase__, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], UpperCAmelCase__ )
)
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
A_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 296 | 0 |
from __future__ import annotations
UpperCamelCase = '''#'''
class __UpperCAmelCase :
def __init__( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._trie
for char in text:
if char not in trie:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = trie[char]
_SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self: str , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._trie
for char in prefix:
if char in trie:
_SCREAMING_SNAKE_CASE = trie[char]
else:
return []
return self._elements(UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for c, v in d.items():
_SCREAMING_SNAKE_CASE = [""" """] if c == END else [(c + s) for s in self._elements(UpperCAmelCase_ )]
result.extend(UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
UpperCamelCase = Trie()
UpperCamelCase = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = trie.find_word(a_ )
return tuple(string + word for word in suffixes )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 306 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=4 , a="gelu" , a=0.0 , a=0.1 , a=True , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> Dict:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Any:
snake_case_ = GPTNeoXJapaneseModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> Union[str, Any]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def _UpperCamelCase ( self ) -> Any:
snake_case_ = 'abeja/gpt-neox-japanese-2.7b'
snake_case_ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
snake_case_ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(a )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(a )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(a , return_tensors='pt' ).input_ids
snake_case_ = model.generate(a , max_length=50 )
snake_case_ = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 178 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _snake_case ( ) -> int:
'''simple docstring'''
_A = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_A = Dataset.from_dict(_snake_case )
return dataset
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = get_dataset()
_A = make_duplicate_clusters(_UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase_ ( self : str ):
_A = get_dataset()
_A , _A = deduplicate_dataset(_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 2 )
print(_UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , _UpperCAmelCase )
| 271 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a = '''docs/source/en/_toctree.yml'''
def _snake_case ( _snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = defaultdict(_snake_case )
_A = []
_A = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_snake_case )
_A = new_doc_list
_A = [key for key, value in counts.items() if value > 1]
_A = []
for duplicate_key in duplicates:
_A = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_snake_case ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_A = sorted(_snake_case , key=lambda _snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_snake_case ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_snake_case )
# Sort
return overview_doc
def _snake_case ( _snake_case : Tuple=False ) -> List[Any]:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' ) as f:
_A = yaml.safe_load(f.read() )
# Get to the API doc
_A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_A = content[api_idx]['sections']
# Then to the model doc
_A = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_A = api_doc[scheduler_idx]['sections']
_A = clean_doc_toc(_snake_case )
_A = False
if new_scheduler_doc != scheduler_doc:
_A = True
if overwrite:
_A = new_scheduler_doc
if diff:
if overwrite:
_A = api_doc
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def _snake_case ( _snake_case : str=False ) -> Union[str, Any]:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' ) as f:
_A = yaml.safe_load(f.read() )
# Get to the API doc
_A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_A = content[api_idx]['sections']
# Then to the model doc
_A = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_A = False
_A = api_doc[pipeline_idx]['sections']
_A = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_A = pipeline_doc['section']
_A = clean_doc_toc(_snake_case )
if overwrite:
_A = new_sub_pipeline_doc
new_pipeline_docs.append(_snake_case )
# sort overall pipeline doc
_A = clean_doc_toc(_snake_case )
if new_pipeline_docs != pipeline_docs:
_A = True
if overwrite:
_A = new_pipeline_docs
if diff:
if overwrite:
_A = api_doc
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 271 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _snake_case ( snake_case ):
UpperCamelCase__ = 'microsoft/speecht5_tts'
UpperCamelCase__ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase__ = 'text_reader'
UpperCamelCase__ = SpeechTaProcessor
UpperCamelCase__ = SpeechTaForTextToSpeech
UpperCamelCase__ = SpeechTaHifiGan
UpperCamelCase__ = ['text']
UpperCamelCase__ = ['audio']
def SCREAMING_SNAKE_CASE ( self ):
if self.post_processor is None:
__magic_name__ : Union[str, Any] = "microsoft/speecht5_hifigan"
super().setup()
def SCREAMING_SNAKE_CASE ( self , _a , _a=None ):
__magic_name__ : Tuple = self.pre_processor(text=_a , return_tensors="pt" , truncation=_a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
__magic_name__ : int = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
__magic_name__ : Dict = torch.tensor(embeddings_dataset[7_305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def SCREAMING_SNAKE_CASE ( self , _a ):
with torch.no_grad():
return self.model.generate_speech(**_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
with torch.no_grad():
return self.post_processor(_a ).cpu().detach()
| 281 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['ConditionalDetrFeatureExtractor']
_snake_case = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
def _A ( snake_case , snake_case ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def _A ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 199 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Dict = inspect.getfile(accelerate.test_utils )
A_ : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
A_ : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
A_ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def _a (self ):
print(F'Found {torch.cuda.device_count()} devices.' )
A_ : Tuple = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def _a (self ):
print(F'Found {torch.cuda.device_count()} devices.' )
A_ : int = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def _a (self ):
A_ : Dict = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def _a (self ):
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
A_ : Any = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase :Any = Accelerator()
lowerCamelCase :Dict = (accelerator.state.process_index + 2, 1_0)
lowerCamelCase :str = torch.randint(0, 1_0, shape).to(accelerator.device)
lowerCamelCase :Any = ''''''
lowerCamelCase :List[Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase :str = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase :Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 206 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :Tuple = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Tuple = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = [sequences]
A_ : List[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : str = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Optional[int] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : Any = self.tokenizer.eos_token
try:
A_ : Optional[Any] = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Union[str, Any] = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Tuple = {}
if "candidate_labels" in kwargs:
A_ : Optional[int] = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : Union[str, Any] = {}
if "multi_label" in kwargs:
A_ : Optional[int] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Any = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : Any = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Dict = inputs["""candidate_label"""]
A_ : Any = inputs["""sequence"""]
A_ : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : Optional[int] = self.model(**lowercase )
A_ : Optional[Any] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Tuple = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : Optional[int] = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Union[str, Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : List[str] = logits.shape[0]
A_ : Optional[int] = len(lowercase )
A_ : int = N // n
A_ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Dict = self.entailment_id
A_ : Dict = -1 if entailment_id == 0 else 0
A_ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Optional[Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Tuple = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : Any = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 206 | 1 |
'''simple docstring'''
UpperCamelCase_ : List[str] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __a ( _UpperCamelCase: dict , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: int ) -> list[str]:
"""simple docstring"""
_snake_case = set()
# keep track of all the paths to be checked
_snake_case = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_snake_case = queue.pop(0 )
# get the last node from the path
_snake_case = path[-1]
if node not in explored:
_snake_case = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_snake_case = list(_UpperCamelCase )
new_path.append(_UpperCamelCase )
queue.append(_UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def __a ( _UpperCamelCase: dict , _UpperCamelCase: int , _UpperCamelCase: List[Any] ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_snake_case = [start]
_snake_case = set(_UpperCamelCase )
# Keep tab on distances from `start` node.
_snake_case = {start: 0, target: -1}
while queue:
_snake_case = queue.pop(0 )
if node == target:
_snake_case = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCamelCase )
queue.append(_UpperCamelCase )
_snake_case = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 350 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __a ( _UpperCamelCase: list[Any] ) -> None:
"""simple docstring"""
create_state_space_tree(_UpperCamelCase , [] , 0 )
def __a ( _UpperCamelCase: list[Any] , _UpperCamelCase: list[Any] , _UpperCamelCase: int ) -> None:
"""simple docstring"""
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCamelCase_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 142 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.