code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def _A ( lowercase ):
"""simple docstring"""
a =int(lowercase )
if n_element < 1:
a =ValueError('''a should be a positive number''' )
raise my_error
a =[1]
a , a , a =(0, 0, 0)
a =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCamelCase_ : List[str] = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCamelCase_ : List[Any] = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""") | 81 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if isinstance(snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __lowerCAmelCase :
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , F'Difference between torch and flax is {diff} (>= {tol}).' )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFViTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFRobertaModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_snake_case )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_snake_case , padding=_snake_case , return_tensors="""np""" )
_lowerCAmelCase = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1e-3 ) )
| 82 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : int = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase__ ( lowercase ):
lowercase__ = """visual_bert"""
def __init__( self : List[Any] ,lowerCamelCase__ : Tuple=30522 ,lowerCamelCase__ : str=768 ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : Dict=3072 ,lowerCamelCase__ : List[str]="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : Optional[int]=1E-12 ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : List[str]=0 ,lowerCamelCase__ : List[str]=2 ,**lowerCamelCase__ : str ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Tuple = vocab_size
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Union[str, Any] = visual_embedding_dim
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Tuple = type_vocab_size
_UpperCamelCase : Optional[Any] = layer_norm_eps
_UpperCamelCase : List[Any] = bypass_transformer
_UpperCamelCase : Optional[Any] = special_visual_initialize
| 83 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'xlm-roberta-base': 5_12,
'xlm-roberta-large': 5_12,
'xlm-roberta-large-finetuned-conll02-dutch': 5_12,
'xlm-roberta-large-finetuned-conll02-spanish': 5_12,
'xlm-roberta-large-finetuned-conll03-english': 5_12,
'xlm-roberta-large-finetuned-conll03-german': 5_12,
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = VOCAB_FILES_NAMES
UpperCAmelCase_ :int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ :int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCAmelCase_ :Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowerCAmelCase_ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
lowerCAmelCase_ :Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ :Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ :List[str] = 1
lowerCAmelCase_ :int = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase_ :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
lowerCAmelCase_ :List[str] = self.__dict__.copy()
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ :str = {}
lowerCAmelCase_ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ :Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ :List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ :Optional[Any] = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :List[str] = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ :Tuple = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
lowerCAmelCase_ :int = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 84 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 0 |
'''simple docstring'''
import operator as op
_SCREAMING_SNAKE_CASE : Optional[int] = "scaler.pt"
_SCREAMING_SNAKE_CASE : Tuple = "pytorch_model"
_SCREAMING_SNAKE_CASE : Optional[Any] = "random_states"
_SCREAMING_SNAKE_CASE : str = "optimizer"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "scheduler"
_SCREAMING_SNAKE_CASE : Optional[int] = "pytorch_model.bin"
_SCREAMING_SNAKE_CASE : Dict = "pytorch_model.bin.index.json"
_SCREAMING_SNAKE_CASE : Dict = "model.safetensors"
_SCREAMING_SNAKE_CASE : Optional[int] = "model.safetensors.index.json"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "1.10.2"
_SCREAMING_SNAKE_CASE : str = "py38"
_SCREAMING_SNAKE_CASE : Dict = "4.17.0"
_SCREAMING_SNAKE_CASE : Tuple = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
_SCREAMING_SNAKE_CASE : Optional[int] = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
_SCREAMING_SNAKE_CASE : Any = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
_SCREAMING_SNAKE_CASE : int = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
_SCREAMING_SNAKE_CASE : Any = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
_SCREAMING_SNAKE_CASE : str = "2.0.1"
_SCREAMING_SNAKE_CASE : int = ["pdsh", "standard", "openmpi", "mvapich"]
_SCREAMING_SNAKE_CASE : Optional[Any] = ["default", "reduce-overhead", "max-autotune"]
_SCREAMING_SNAKE_CASE : Optional[int] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_SCREAMING_SNAKE_CASE : Dict = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
_SCREAMING_SNAKE_CASE : Tuple = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
_SCREAMING_SNAKE_CASE : List[Any] = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 85 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = """MobileNetV1Config"""
# Base docstring
lowerCamelCase__ = """google/mobilenet_v1_1.0_224"""
lowerCamelCase__ = [1, 1_024, 7, 7]
# Image classification docstring
lowerCamelCase__ = """google/mobilenet_v1_1.0_224"""
lowerCamelCase__ = """tabby, tabby cat"""
lowerCamelCase__ = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
__lowerCAmelCase : List[str] = {}
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = model.mobilenet_va
else:
__lowerCAmelCase : List[Any] = model
__lowerCAmelCase : Any = 'MobilenetV1/Conv2d_0/'
__lowerCAmelCase : List[str] = backbone.conv_stem.convolution.weight
__lowerCAmelCase : Optional[int] = backbone.conv_stem.normalization.bias
__lowerCAmelCase : Dict = backbone.conv_stem.normalization.weight
__lowerCAmelCase : Optional[int] = backbone.conv_stem.normalization.running_mean
__lowerCAmelCase : str = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__lowerCAmelCase : Union[str, Any] = i + 1
__lowerCAmelCase : List[str] = i * 2
__lowerCAmelCase : List[Any] = backbone.layer[pt_index]
__lowerCAmelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__lowerCAmelCase : Any = pointer.convolution.weight
__lowerCAmelCase : Tuple = pointer.normalization.bias
__lowerCAmelCase : Tuple = pointer.normalization.weight
__lowerCAmelCase : List[str] = pointer.normalization.running_mean
__lowerCAmelCase : Dict = pointer.normalization.running_var
__lowerCAmelCase : int = backbone.layer[pt_index + 1]
__lowerCAmelCase : str = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__lowerCAmelCase : Union[str, Any] = pointer.convolution.weight
__lowerCAmelCase : int = pointer.normalization.bias
__lowerCAmelCase : Tuple = pointer.normalization.weight
__lowerCAmelCase : List[Any] = pointer.normalization.running_mean
__lowerCAmelCase : Tuple = pointer.normalization.running_var
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__lowerCAmelCase : Optional[Any] = model.classifier.weight
__lowerCAmelCase : List[str] = model.classifier.bias
return tf_to_pt_map
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__lowerCAmelCase : List[str] = tf.train.list_variables(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
__lowerCAmelCase : Any = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Dict = array
# Build TF to PyTorch weights loading map
__lowerCAmelCase : Optional[Any] = _build_tf_to_pytorch_map(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
__lowerCAmelCase : Any = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__lowerCAmelCase : Dict = np.transpose(_UpperCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowerCAmelCase : Optional[Any] = array.squeeze().transpose()
else:
__lowerCAmelCase : Any = np.transpose(_UpperCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
__lowerCAmelCase : str = torch.from_numpy(_UpperCamelCase )
tf_weights.pop(_UpperCamelCase , _UpperCamelCase )
tf_weights.pop(name + '/RMSProp' , _UpperCamelCase )
tf_weights.pop(name + '/RMSProp_1' , _UpperCamelCase )
tf_weights.pop(name + '/ExponentialMovingAverage' , _UpperCamelCase )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = features.shape[-2:]
__lowerCAmelCase , __lowerCAmelCase : Dict = conv_layer.stride
__lowerCAmelCase , __lowerCAmelCase : int = conv_layer.kernel_size
if in_height % stride_height == 0:
__lowerCAmelCase : Dict = max(kernel_height - stride_height , 0 )
else:
__lowerCAmelCase : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__lowerCAmelCase : Tuple = max(kernel_width - stride_width , 0 )
else:
__lowerCAmelCase : Tuple = max(kernel_width - (in_width % stride_width) , 0 )
__lowerCAmelCase : Optional[Any] = pad_along_width // 2
__lowerCAmelCase : int = pad_along_width - pad_left
__lowerCAmelCase : str = pad_along_height // 2
__lowerCAmelCase : Tuple = pad_along_height - pad_top
__lowerCAmelCase : Optional[int] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCamelCase , _UpperCamelCase , 'constant' , 0.0 )
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , ):
super().__init__()
__lowerCAmelCase : Any = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
__lowerCAmelCase : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__lowerCAmelCase : Optional[int] = nn.Convad(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , padding_mode='zeros' , )
if use_normalization:
__lowerCAmelCase : Dict = nn.BatchNormad(
num_features=_SCREAMING_SNAKE_CASE , eps=config.layer_norm_eps , momentum=0.9997 , affine=_SCREAMING_SNAKE_CASE , track_running_stats=_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : int = None
if use_activation:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase : List[Any] = config.hidden_act
else:
__lowerCAmelCase : Any = None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.config.tf_padding:
__lowerCAmelCase : Dict = apply_tf_padding(_SCREAMING_SNAKE_CASE , self.convolution )
__lowerCAmelCase : str = self.convolution(_SCREAMING_SNAKE_CASE )
if self.normalization is not None:
__lowerCAmelCase : Union[str, Any] = self.normalization(_SCREAMING_SNAKE_CASE )
if self.activation is not None:
__lowerCAmelCase : Optional[Any] = self.activation(_SCREAMING_SNAKE_CASE )
return features
class A__ ( _lowerCamelCase):
A_ : int = MobileNetVaConfig
A_ : Optional[int] = load_tf_weights_in_mobilenet_va
A_ : int = 'mobilenet_v1'
A_ : Dict = 'pixel_values'
A_ : int = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCamelCase__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , _lowerCamelCase , )
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = config
__lowerCAmelCase : Optional[Any] = 32
__lowerCAmelCase : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth )
__lowerCAmelCase : Optional[Any] = MobileNetVaConvLayer(
_SCREAMING_SNAKE_CASE , in_channels=config.num_channels , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=3 , stride=2 , )
__lowerCAmelCase : str = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowerCAmelCase : List[str] = nn.ModuleList()
for i in range(13 ):
__lowerCAmelCase : List[str] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowerCAmelCase : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=3 , stride=strides[i] , groups=_SCREAMING_SNAKE_CASE , ) )
self.layer.append(
MobileNetVaConvLayer(
_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=1 , ) )
__lowerCAmelCase : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__lowerCAmelCase : List[str] = self.conv_stem(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__lowerCAmelCase : Optional[int] = layer_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
__lowerCAmelCase : int = all_hidden_states + (hidden_states,)
__lowerCAmelCase : int = hidden_states
if self.pooler is not None:
__lowerCAmelCase : Optional[int] = torch.flatten(self.pooler(_SCREAMING_SNAKE_CASE ) , start_dim=1 )
else:
__lowerCAmelCase : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _lowerCamelCase , )
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = config.num_labels
__lowerCAmelCase : Union[str, Any] = MobileNetVaModel(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowerCAmelCase : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = nn.Linear(_SCREAMING_SNAKE_CASE , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : int = self.mobilenet_va(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase : Dict = self.classifier(self.dropout(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : Optional[int] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : Optional[int] = 'single_label_classification'
else:
__lowerCAmelCase : int = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCAmelCase : Tuple = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase : Dict = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : Tuple = CrossEntropyLoss()
__lowerCAmelCase : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Tuple = BCEWithLogitsLoss()
__lowerCAmelCase : List[Any] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
__lowerCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , ) | 86 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def lowercase_ ( _lowerCamelCase : Any):
lowercase__ : Optional[int] = _TestCommandArgs(dataset=_lowerCamelCase , all_configs=_lowerCamelCase , save_infos=_lowerCamelCase)
lowercase__ : int = TestCommand(*_lowerCamelCase)
test_command.run()
lowercase__ : Dict = os.path.join(_lowerCamelCase , "README.md")
assert os.path.exists(_lowerCamelCase)
lowercase__ : str = DatasetInfosDict.from_directory(_lowerCamelCase)
lowercase__ : Dict = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
})
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase__ , lowercase__ : Union[str, Any] = getattr(dataset_infos["default"] , _lowerCamelCase), getattr(expected_dataset_infos["default"] , _lowerCamelCase)
if key == "num_bytes":
assert is_apercent_close(_lowerCamelCase , _lowerCamelCase)
elif key == "splits":
assert list(_lowerCamelCase) == list(_lowerCamelCase)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes)
else:
result == expected
| 87 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCAmelCase : Optional[Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
__lowerCAmelCase : List[str] = cvtColor(img, COLOR_BGR2GRAY)
def a__ ( ):
'''simple docstring'''
__magic_name__ = cn.convert_to_negative(A_ )
# assert negative_img array for at least one True
assert negative_img.any()
def a__ ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(A_, 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a__ ( ):
'''simple docstring'''
__magic_name__ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a__ ( ):
'''simple docstring'''
__magic_name__ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__magic_name__ = canny.canny(A_ )
# assert canny array for at least one True
assert canny_array.any()
def a__ ( ):
'''simple docstring'''
assert gg.gaussian_filter(A_, 5, sigma=0.9 ).all()
def a__ ( ):
'''simple docstring'''
__magic_name__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__magic_name__ = conv.img_convolve(A_, A_ ).astype(A_ )
assert res.any()
def a__ ( ):
'''simple docstring'''
assert med.median_filter(A_, 3 ).any()
def a__ ( ):
'''simple docstring'''
__magic_name__ , __magic_name__ = sob.sobel_filter(A_ )
assert grad.any() and theta.any()
def a__ ( ):
'''simple docstring'''
__magic_name__ = sp.make_sepia(A_, 20 )
assert sepia.all()
def a__ ( A_ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__magic_name__ = bs.Burkes(imread(A_, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def a__ ( A_ = "digital_image_processing/image_data/lena_small.jpg", ):
'''simple docstring'''
__magic_name__ = rs.NearestNeighbour(imread(A_, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def a__ ( ):
'''simple docstring'''
__magic_name__ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__magic_name__ = imread(A_, 0 )
# Test for get_neighbors_pixel function() return not None
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = image[x_coordinate][y_coordinate]
__magic_name__ = lbp.get_neighbors_pixel(
A_, A_, A_, A_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__magic_name__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
__magic_name__ = lbp.local_binary_value(A_, A_, A_ )
assert lbp_image.any()
| 88 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 0 |
'''simple docstring'''
__lowerCAmelCase = tuple[float, float, float]
__lowerCAmelCase = tuple[float, float, float]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Vectorad:
_a : str = end_pointa[0] - end_pointa[0]
_a : Optional[int] = end_pointa[1] - end_pointa[1]
_a : str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Vectorad:
_a : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
_a : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_a : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ) -> bool:
_a : Optional[int] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
| 89 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 0 |
from math import pi, sqrt, tan
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__lowerCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__lowerCamelCase = (sidea + sidea + sidea) / 2
__lowerCamelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 90 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_text_model"
def __init__( self : Tuple , lowercase_ : str=30524 , lowercase_ : Union[str, Any]=768 , lowercase_ : Optional[Any]=768 , lowercase_ : Dict=3072 , lowercase_ : Optional[Any]=768 , lowercase_ : Any=12 , lowercase_ : Dict=8 , lowercase_ : str=512 , lowercase_ : Any="gelu" , lowercase_ : str=1e-12 , lowercase_ : List[str]=0.0 , lowercase_ : Any=0.0 , lowercase_ : List[str]=0.02 , lowercase_ : int=30522 , lowercase_ : Any=2 , lowercase_ : Optional[int]=0 , lowercase_ : Union[str, Any]=102 , lowercase_ : Optional[Any]=True , lowercase_ : str=True , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = encoder_hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : str = projection_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_decoder
SCREAMING_SNAKE_CASE_ : Optional[int] = use_cache
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = cls.get_config_dict(lowercase_ , **lowercase_)
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''') == "blip":
SCREAMING_SNAKE_CASE_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(lowercase_ , **lowercase_)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_vision_model"
def __init__( self : Optional[Any] , lowercase_ : Tuple=768 , lowercase_ : Tuple=3072 , lowercase_ : Tuple=512 , lowercase_ : int=12 , lowercase_ : Optional[int]=12 , lowercase_ : List[Any]=384 , lowercase_ : List[str]=16 , lowercase_ : Any="gelu" , lowercase_ : Optional[int]=1e-5 , lowercase_ : List[str]=0.0 , lowercase_ : List[str]=1e-10 , **lowercase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = projection_dim
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , lowercase_ : Union[str, os.PathLike] , **lowercase_ : int):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = cls.get_config_dict(lowercase_ , **lowercase_)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''') == "blip":
SCREAMING_SNAKE_CASE_ : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(lowercase_ , **lowercase_)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip"
__UpperCamelCase = True
def __init__( self : List[Any] , lowercase_ : str=None , lowercase_ : List[str]=None , lowercase_ : Tuple=512 , lowercase_ : Optional[Any]=2.65_92 , lowercase_ : Dict=256 , **lowercase_ : Tuple , ):
'''simple docstring'''
super().__init__(**lowercase_)
if text_config is None:
SCREAMING_SNAKE_CASE_ : Any = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''')
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Dict = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''')
SCREAMING_SNAKE_CASE_ : List[str] = BlipTextConfig(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = BlipVisionConfig(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = projection_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logit_scale_init_value
SCREAMING_SNAKE_CASE_ : List[Any] = 1.0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.02
SCREAMING_SNAKE_CASE_ : List[str] = image_text_hidden_size
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : BlipTextConfig , lowercase_ : BlipVisionConfig , **lowercase_ : Any):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ : str = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 91 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = 2_0
__lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=_A )
# tweak scores to not be uniform anymore
__lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCAmelCase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCAmelCase = jax.nn.softmax(_A , axis=-1 )
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(_A , scores.copy() , cur_len=_A ) , axis=-1 )
__lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(_A , scores.copy() , cur_len=_A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = 1_0
__lowerCAmelCase = 2
# create ramp distribution
__lowerCAmelCase = np.broadcast_to(np.arange(_A )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCAmelCase = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase = top_k_warp(_A , _A , cur_len=_A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCAmelCase = 5
__lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCAmelCase = np.broadcast_to(np.arange(_A )[None, :] , (batch_size, length) ).copy()
__lowerCAmelCase = top_k_warp_safety_check(_A , _A , cur_len=_A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = 1_0
__lowerCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
__lowerCAmelCase = np.exp(top_p_warp(_A , _A , cur_len=_A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__lowerCAmelCase = np.broadcast_to(np.arange(_A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCAmelCase = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCAmelCase = top_p_warp(_A , _A , cur_len=_A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = 4
__lowerCAmelCase = 0
__lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_A )
# check that min length is applied at length 5
__lowerCAmelCase = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
__lowerCAmelCase = 5
__lowerCAmelCase = self._get_uniform_logits(_A , _A )
__lowerCAmelCase = min_dist_processor(_A , _A , cur_len=_A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
__lowerCAmelCase = self._get_uniform_logits(_A , _A )
__lowerCAmelCase = 1_5
__lowerCAmelCase = min_dist_processor(_A , _A , cur_len=_A )
self.assertFalse(jnp.isinf(_A ).any() )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = 4
__lowerCAmelCase = 0
__lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A )
# check that all scores are -inf except the bos_token_id score
__lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=2_0 )
__lowerCAmelCase = 1
__lowerCAmelCase = self._get_uniform_logits(_A , _A )
__lowerCAmelCase = logits_processor(_A , _A , cur_len=_A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCAmelCase = 3
__lowerCAmelCase = self._get_uniform_logits(_A , _A )
__lowerCAmelCase = logits_processor(_A , _A , cur_len=_A )
self.assertFalse(jnp.isinf(_A ).any() )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = 4
__lowerCAmelCase = 0
__lowerCAmelCase = 5
__lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=2_0 )
__lowerCAmelCase = 4
__lowerCAmelCase = self._get_uniform_logits(_A , _A )
__lowerCAmelCase = logits_processor(_A , _A , cur_len=_A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCAmelCase = 3
__lowerCAmelCase = self._get_uniform_logits(_A , _A )
__lowerCAmelCase = logits_processor(_A , _A , cur_len=_A )
self.assertFalse(jnp.isinf(_A ).any() )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 4
__lowerCAmelCase = 1_0
__lowerCAmelCase = 1_5
__lowerCAmelCase = 2
__lowerCAmelCase = 1
__lowerCAmelCase = 1_5
# dummy input_ids and scores
__lowerCAmelCase = ids_tensor((batch_size, sequence_length) , _A )
__lowerCAmelCase = input_ids.copy()
__lowerCAmelCase = self._get_uniform_logits(_A , _A )
__lowerCAmelCase = scores.copy()
# instantiate all dist processors
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_A )
__lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A )
__lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A )
__lowerCAmelCase = 1_0
# no processor list
__lowerCAmelCase = temp_dist_warp(_A , _A , cur_len=_A )
__lowerCAmelCase = top_k_warp(_A , _A , cur_len=_A )
__lowerCAmelCase = top_p_warp(_A , _A , cur_len=_A )
__lowerCAmelCase = min_dist_proc(_A , _A , cur_len=_A )
__lowerCAmelCase = bos_dist_proc(_A , _A , cur_len=_A )
__lowerCAmelCase = eos_dist_proc(_A , _A , cur_len=_A )
# with processor list
__lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase = processor(_A , _A , cur_len=_A )
# scores should be equal
self.assertTrue(jnp.allclose(_A , _A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 4
__lowerCAmelCase = 1_0
__lowerCAmelCase = 1_5
__lowerCAmelCase = 2
__lowerCAmelCase = 1
__lowerCAmelCase = 1_5
# dummy input_ids and scores
__lowerCAmelCase = ids_tensor((batch_size, sequence_length) , _A )
__lowerCAmelCase = input_ids.copy()
__lowerCAmelCase = self._get_uniform_logits(_A , _A )
__lowerCAmelCase = scores.copy()
# instantiate all dist processors
__lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_A )
__lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A )
__lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A )
__lowerCAmelCase = 1_0
# no processor list
def run_no_processor_list(_A , _A , _A ):
__lowerCAmelCase = temp_dist_warp(_A , _A , cur_len=_A )
__lowerCAmelCase = top_k_warp(_A , _A , cur_len=_A )
__lowerCAmelCase = top_p_warp(_A , _A , cur_len=_A )
__lowerCAmelCase = min_dist_proc(_A , _A , cur_len=_A )
__lowerCAmelCase = bos_dist_proc(_A , _A , cur_len=_A )
__lowerCAmelCase = eos_dist_proc(_A , _A , cur_len=_A )
return scores
# with processor list
def run_processor_list(_A , _A , _A ):
__lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase = processor(_A , _A , cur_len=_A )
return scores
__lowerCAmelCase = jax.jit(_A )
__lowerCAmelCase = jax.jit(_A )
__lowerCAmelCase = jitted_run_no_processor_list(_A , _A , _A )
__lowerCAmelCase = jitted_run_processor_list(_A , _A , _A )
# scores should be equal
self.assertTrue(jnp.allclose(_A , _A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 92 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowercase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowercase : Union[str, Any] = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase_ : str = self.diffusers_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : str = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase_ : Union[str, Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase_ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase_ : Union[str, Any] = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowercase_ : List[str] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''DDPM''' , '''Test''' , __SCREAMING_SNAKE_CASE ) , )
| 93 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_12,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
snake_case : Dict = parser.parse_args()
snake_case : int = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 94 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase):
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
a__ : List[str] =AutoTokenizer.from_pretrained("xlm-roberta-base" )
a__ : List[str] ="The dog is cute and lives in the garden house"
a__ : str =jnp.array([tokenizer.encode(lowerCAmelCase__ )] )
a__ : List[Any] =(1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
a__ : Optional[int] =jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
a__ : Dict =model(lowerCAmelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowerCAmelCase__ , atol=1E-3 ) )
| 95 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """mobilenet_v1"""
def __init__( self , lowercase=3 , lowercase=224 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.9_99 , lowercase=0.02 , lowercase=0.0_01 , **lowercase , ):
super().__init__(**lowercase )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Any = image_size
_lowerCamelCase : str = depth_multiplier
_lowerCamelCase : Dict = min_depth
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Union[str, Any] = tf_padding
_lowerCamelCase : str = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : int = layer_norm_eps
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A_ ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def A_ ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def A_ ( self ):
return 1E-4 | 96 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
for e in env_keys:
UpperCamelCase__ :Optional[int] = int(os.environ.get(__a , -1 ) )
if val >= 0:
return val
return default
def a ( __a , __a=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = os.environ.get(__a , str(__a ) )
return strtobool(__a ) == 1 # As its name indicates `strtobool` actually returns an int...
def a ( __a , __a="no" ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = os.environ.get(__a , str(__a ) )
return value | 97 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = TextaTextGenerationPipeline(model=lowerCamelCase__ ,tokenizer=lowerCamelCase__ )
return generator, ["Something to write", "Something else"]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = generator('Something there' )
self.assertEqual(lowerCamelCase__ ,[{'generated_text': ANY(lowerCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
UpperCAmelCase__ = generator(['This is great !', 'Something else'] ,num_return_sequences=2 ,do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ ,[
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
] ,)
UpperCAmelCase__ = generator(
['This is great !', 'Something else'] ,num_return_sequences=2 ,batch_size=2 ,do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ ,[
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
] ,)
with self.assertRaises(lowerCamelCase__ ):
generator(4 )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='pt' )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ = generator('Something there' ,do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,[{'generated_text': ''}] )
UpperCAmelCase__ = 3
UpperCAmelCase__ = generator(
'Something there' ,num_return_sequences=lowerCamelCase__ ,num_beams=lowerCamelCase__ ,)
UpperCAmelCase__ = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = generator('This is a test' ,do_sample=lowerCamelCase__ ,num_return_sequences=2 ,return_tensors=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ ,[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] ,)
UpperCAmelCase__ = generator.model.config.eos_token_id
UpperCAmelCase__ = '<pad>'
UpperCAmelCase__ = generator(
['This is a test', 'This is a second test'] ,do_sample=lowerCamelCase__ ,num_return_sequences=2 ,batch_size=2 ,return_tensors=lowerCamelCase__ ,)
self.assertEqual(
lowerCamelCase__ ,[
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] ,)
@require_tf
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='tf' )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ = generator('Something there' ,do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,[{'generated_text': ''}] )
| 98 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A_ ( A__ ) -> tuple:
return (data["data"], data["target"])
def A_ ( A__ , A__ , A__ ) -> np.ndarray:
a__ : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(A__ , A__ )
# Predict target for test data
a__ : Union[str, Any] = xgb.predict(A__ )
a__ : Optional[Any] = predictions.reshape(len(A__ ) , 1 )
return predictions
def A_ ( ) -> None:
a__ : List[str] = fetch_california_housing()
a__ , a__ : int = data_handling(A__ )
a__ , a__ , a__ , a__ : Optional[int] = train_test_split(
A__ , A__ , test_size=0.25 , random_state=1 )
a__ : Optional[Any] = xgboost(A__ , A__ , A__ )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(A__ , A__ )}' )
print(F'Mean Square Error : {mean_squared_error(A__ , A__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 99 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self._create_example_records()
__SCREAMING_SNAKE_CASE = Dataset.from_list(lowerCAmelCase__)
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""])
for i, r in enumerate(lowerCAmelCase__):
self.assertDictEqual(lowerCAmelCase__ , example_records[i])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self._create_example_records()
__SCREAMING_SNAKE_CASE = Dataset.from_list(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def snake_case_ ( self): # checks what happens with missing columns
__SCREAMING_SNAKE_CASE = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__SCREAMING_SNAKE_CASE = Dataset.from_list(lowerCAmelCase__)
self.assertDictEqual(dset[0] , {"""col_1""": 1})
self.assertDictEqual(dset[1] , {"""col_1""": None}) # NB: first record is used for columns
def snake_case_ ( self): # checks if the type can be inferred from the second record
__SCREAMING_SNAKE_CASE = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__SCREAMING_SNAKE_CASE = Dataset.from_list(lowerCAmelCase__)
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""")))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = Dataset.from_list([])
self.assertEqual(len(lowerCAmelCase__) , 0)
self.assertListEqual(dset.column_names , [])
| 100 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase_ : str =1
@register_to_config
def __init__( self ,A__=2_0_0_0 ,A__=0.1 ,A__=2_0 ,A__=1E-3):
lowercase = None
lowercase = None
lowercase = None
def A__ ( self ,A__ ,A__ = None):
lowercase = torch.linspace(1 ,self.config.sampling_eps ,A__ ,device=A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__=None):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''')
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowercase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowercase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
lowercase = std.flatten()
while len(std.shape) < len(score.shape):
lowercase = std.unsqueeze(-1)
lowercase = -score / std
# compute
lowercase = -1.0 / len(self.timesteps)
lowercase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowercase = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
lowercase = beta_t.unsqueeze(-1)
lowercase = -0.5 * beta_t * x
lowercase = torch.sqrt(A__)
lowercase = drift - diffusion**2 * score
lowercase = x + drift * dt
# add noise
lowercase = randn_tensor(x.shape ,layout=x.layout ,generator=A__ ,device=x.device ,dtype=x.dtype)
lowercase = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__( self):
return self.config.num_train_timesteps
| 101 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[int | float], int | float] , _snake_case : int | float , _snake_case : int | float , _snake_case : int = 100 , ) ->float:
"""simple docstring"""
__snake_case : Tuple = x_start
__snake_case : List[Any] = fnc(_snake_case )
__snake_case : Tuple = 0.0
for _ in range(_snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__snake_case : Any = (x_end - x_start) / steps + xa
__snake_case : int = fnc(_snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__snake_case : Any = xa
__snake_case : str = fxa
return area
if __name__ == "__main__":
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
SCREAMING_SNAKE_CASE : Tuple = 10
while i <= 10_0000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 102 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : List[Any] = getLogger(__name__)
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int = 8 ,__UpperCamelCase : int = 1024 ,__UpperCamelCase : str="val" ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : List[str]="summarization" ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Optional[int]=1 ,__UpperCamelCase : Dict = None ,__UpperCamelCase : str="" ,**__UpperCamelCase : Optional[int] ,):
lowerCAmelCase_ : Optional[Any] = str(__UpperCamelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' ,rank=__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = Path(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(__UpperCamelCase )
lowerCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).cuda()
if fpaa:
lowerCAmelCase_ : Union[str, Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__UpperCamelCase ,__UpperCamelCase ) # update config with task specific params
lowerCAmelCase_ : Optional[int] = generate_kwargs.pop('''num_beams''' ,model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowerCAmelCase_ : Optional[Any] = num_return_sequences
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(__UpperCamelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowerCAmelCase_ : str = tokenizer.model_max_length
if prefix is None:
lowerCAmelCase_ : str = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
lowerCAmelCase_ : Union[str, Any] = SeqaSeqDataset(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,max_target_length=1024 ,type_path=__UpperCamelCase ,n_obs=__UpperCamelCase ,prefix=__UpperCamelCase ,**__UpperCamelCase ,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowerCAmelCase_ : Optional[int] = ds.make_sortish_sampler(__UpperCamelCase ,distributed=__UpperCamelCase ,add_extra_examples=__UpperCamelCase ,shuffle=__UpperCamelCase )
lowerCAmelCase_ : List[Any] = DataLoader(__UpperCamelCase ,sampler=__UpperCamelCase ,batch_size=__UpperCamelCase ,collate_fn=ds.collate_fn )
lowerCAmelCase_ : List[str] = []
for batch in tqdm(__UpperCamelCase ):
lowerCAmelCase_ : List[str] = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) ,attention_mask=batch['''attention_mask'''].to(model.device ) ,num_return_sequences=__UpperCamelCase ,num_beams=__UpperCamelCase ,**__UpperCamelCase ,)
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase )
lowerCAmelCase_ : List[str] = batch['''ids''']
if num_return_sequences > 1:
lowerCAmelCase_ : List[Any] = chunks(__UpperCamelCase ,__UpperCamelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__UpperCamelCase ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(__UpperCamelCase ,__UpperCamelCase )
return results, sampler.num_replicas
def UpperCamelCase( ):
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' ,type=__UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' ,type=__UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' ,default='''sshleifer/distilbart-xsum-12-3''' ,)
parser.add_argument('''--save_dir''' ,type=__UpperCamelCase ,help='''where to save''' ,default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' ,type=__UpperCamelCase ,default=__UpperCamelCase )
parser.add_argument(
'''--type_path''' ,type=__UpperCamelCase ,default='''test''' ,help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' ,type=__UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=__UpperCamelCase ,default=8 ,required=__UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--local_rank''' ,type=__UpperCamelCase ,default=-1 ,required=__UpperCamelCase ,help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,required=__UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' ,type=__UpperCamelCase ,default=1 ,required=__UpperCamelCase ,help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' ,type=__UpperCamelCase ,default=600 ,required=__UpperCamelCase ,help='''How long should master process wait for other processes to finish.''' ,)
parser.add_argument('''--src_lang''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,required=__UpperCamelCase )
parser.add_argument('''--tgt_lang''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,required=__UpperCamelCase )
parser.add_argument(
'''--prefix''' ,type=__UpperCamelCase ,required=__UpperCamelCase ,default=__UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--debug''' ,action='''store_true''' )
lowerCAmelCase_ : Tuple = time.time()
lowerCAmelCase_ , lowerCAmelCase_ : str = parser.parse_known_args()
lowerCAmelCase_ : Dict = parse_numeric_n_bool_cl_kwargs(__UpperCamelCase )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
lowerCAmelCase_ : Tuple = Path(args.save_dir + '''_tmp''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) # this handles locking.
lowerCAmelCase_ : Optional[Any] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowerCAmelCase_ : Optional[Any] = {}
if args.src_lang is not None:
lowerCAmelCase_ : Dict = args.src_lang
if args.tgt_lang is not None:
lowerCAmelCase_ : Optional[int] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Any = eval_data_dir(
args.data_dir ,__UpperCamelCase ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=__UpperCamelCase ,**__UpperCamelCase ,)
if args.local_rank <= 0:
lowerCAmelCase_ : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=__UpperCamelCase )
lowerCAmelCase_ : str = gather_results_from_each_node(__UpperCamelCase ,__UpperCamelCase ,args.sync_timeout )
lowerCAmelCase_ : Any = combine_partial_results(__UpperCamelCase )
if args.num_return_sequences > 1:
lowerCAmelCase_ : Optional[int] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(__UpperCamelCase ,__UpperCamelCase )
return
lowerCAmelCase_ : Union[str, Any] = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(__UpperCamelCase ) as f:
lowerCAmelCase_ : str = [x.rstrip() for x in f.readlines()][: len(__UpperCamelCase )]
# Calculate metrics, save metrics, and save _generations.txt
lowerCAmelCase_ : List[str] = '''translation''' in args.task
lowerCAmelCase_ : Dict = calculate_bleu if calc_bleu else calculate_rouge
lowerCAmelCase_ : List[str] = '''bleu''' if calc_bleu else '''rouge'''
lowerCAmelCase_ : Dict = score_fn(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = len(__UpperCamelCase )
lowerCAmelCase_ : str = time.time() - start_time
lowerCAmelCase_ : Union[str, Any] = round(runtime / metrics['''n_obs'''] ,4 )
lowerCAmelCase_ : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowerCAmelCase_ : str = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase )
print(__UpperCamelCase )
write_txt_file(__UpperCamelCase ,save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(__UpperCamelCase ,save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : str ):
lowerCAmelCase_ : Optional[Any] = []
for partial_result in partial_results:
records.extend(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x["id"] )
lowerCAmelCase_ : Dict = [x['''pred'''] for x in records]
return preds
def UpperCamelCase( __UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ):
# WAIT FOR lots of .json files
lowerCAmelCase_ : Any = time.time()
logger.info('''waiting for all nodes to finish''' )
lowerCAmelCase_ : int = None
while (time.time() - start_wait) < timeout:
lowerCAmelCase_ : Optional[int] = list(save_dir.glob('''rank_*.json''' ) )
if len(__UpperCamelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowerCAmelCase_ : List[str] = lmap(__UpperCamelCase ,__UpperCamelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 103 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 0 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 104 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 0 |
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Any = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : Tuple = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 105 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCamelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCamelCase : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,'''models/bert/''' ) )
lowerCAmelCase__ : int = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ ,'''src/transformers/models/bert/modeling_bert.py''' ) ,os.path.join(self.transformer_dir ,'''models/bert/modeling_bert.py''' ) ,)
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Tuple = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Tuple ,lowercase_ : List[str] ,lowercase_ : List[str] ,lowercase_ : Any=None ):
lowerCAmelCase__ : Any = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
lowerCAmelCase__ : str = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
lowerCAmelCase__ : str = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_1_9 )
lowerCAmelCase__ : Union[str, Any] = black.format_str(lowercase_ ,mode=lowercase_ )
lowerCAmelCase__ : Any = os.path.join(self.transformer_dir ,'''new_code.py''' )
with open(lowercase_ ,'''w''' ,newline='''\n''' ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=lowercase_ )
with open(lowercase_ ,'''r''' ) as f:
self.assertTrue(f.read() ,lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[Any] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' ,'''BertLMPredictionHead''' ,REFERENCE_CODE + '''\n''' ,)
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' ,'''BertLMPredictionHead''' ,lowercase_ ,)
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' ,'''TestModelLMPredictionHead''' ,re.sub('''Bert''' ,'''TestModel''' ,lowercase_ ) ,)
# Copy consistency with a really long name
lowerCAmelCase__ : List[Any] = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' ,F'{long_class_name}LMPredictionHead' ,re.sub('''Bert''' ,lowercase_ ,lowercase_ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' ,'''TestModelLMPredictionHead''' ,lowercase_ ,overwrite_result=re.sub('''Bert''' ,'''TestModel''' ,lowercase_ ) ,)
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : List[str] = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase__ : str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase__ : Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase__ : int = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = check_copies.convert_to_localized_md(
lowercase_ ,lowercase_ ,localized_readme['''format_model_list'''] )
self.assertFalse(lowercase_ )
self.assertEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : int = check_copies.convert_to_localized_md(
lowercase_ ,lowercase_ ,localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_ )
lowerCAmelCase__ : List[str] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase__ : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase__ : str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase__ ,lowerCAmelCase__ : Any = check_copies.convert_to_localized_md(
lowercase_ ,lowercase_ ,localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(lowercase_ ,lowercase_ )
| 106 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCAmelCase : str = Lock()
def __magic_name__ ( A : Optional[Any], A : Any, A : Union[str, Any], A : List[str], A : int, A : Optional[int], A : Dict ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
a = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
a = min(A, A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
a = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
a = max(A, A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A : int ):
'''simple docstring'''
a = []
a = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
a = Pipe()
a = Pipe()
process_array_.append(
Process(
target=A, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
a = temp_rs
a = temp_rr
for i in range(1, len(A ) - 1 ):
a = Pipe()
a = Pipe()
process_array_.append(
Process(
target=A, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
a = temp_rs
a = temp_rr
process_array_.append(
Process(
target=A, args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(A ) ):
a = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ):
'''simple docstring'''
a = list(range(10, 0, -1 ) )
print("Initial List" )
print(*A )
a = odd_even_transposition(A )
print("Sorted List\n" )
print(*A )
if __name__ == "__main__":
main()
| 107 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, Iterable[int]] , SCREAMING_SNAKE_CASE : bool , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : Optional[int]=None ):
lowerCAmelCase : List[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase : int = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase : Optional[int] = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase : List[Any] = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else output_size
lowerCAmelCase , lowerCAmelCase : Tuple = get_image_size(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = output_size
# determine new height and width
lowerCAmelCase : Dict = output_height / input_height
lowerCAmelCase : Tuple = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase : Dict = scale_width
else:
# fit height
lowerCAmelCase : Dict = scale_height
lowerCAmelCase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any =["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = False , snake_case__ = 1 , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : int = size if size is not None else {"height": 384, "width": 384}
lowerCAmelCase : Tuple = get_size_dict(snake_case__ )
lowerCAmelCase : int = do_resize
lowerCAmelCase : Tuple = size
lowerCAmelCase : Dict = keep_aspect_ratio
lowerCAmelCase : List[Any] = ensure_multiple_of
lowerCAmelCase : Optional[int] = resample
lowerCAmelCase : Tuple = do_rescale
lowerCAmelCase : int = rescale_factor
lowerCAmelCase : List[Any] = do_normalize
lowerCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = 1 , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : int = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase : int = get_resize_output_image_size(
snake_case__ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=snake_case__ , multiple=snake_case__ , )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Dict = size if size is not None else self.size
lowerCAmelCase : Any = get_size_dict(snake_case__ )
lowerCAmelCase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase : List[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase : Tuple = resample if resample is not None else self.resample
lowerCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCAmelCase : str = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase : Optional[Any] = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
lowerCAmelCase : Dict = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
lowerCAmelCase : Optional[int] = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowerCAmelCase : Optional[int] = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowerCAmelCase : Any = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case__ ):
lowerCAmelCase : Optional[Any] = target_sizes.numpy()
lowerCAmelCase : Dict = []
for idx in range(len(snake_case__ ) ):
lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case__ )
lowerCAmelCase : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
lowerCAmelCase : int = logits.argmax(dim=1 )
lowerCAmelCase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 108 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 0 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase : Optional[int] = get_aligned_output_features_output_indices(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["""c"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase : str = get_aligned_output_features_output_indices(["""a""", """c"""] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["""a""", """c"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(_SCREAMING_SNAKE_CASE , [0, 2] , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["""a""", """c"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase : Optional[int] = get_aligned_output_features_output_indices(_SCREAMING_SNAKE_CASE , [-3, -1] , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["""a""", """c"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [-3, -1] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _SCREAMING_SNAKE_CASE )
# Out features must be a list
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(_SCREAMING_SNAKE_CASE , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(_SCREAMING_SNAKE_CASE , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : str = BackboneMixin()
UpperCAmelCase : int = ["""a""", """b""", """c"""]
UpperCAmelCase : Dict = ["""a""", """c"""]
UpperCAmelCase : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase : Tuple = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCAmelCase : List[Any] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 109 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
lowercase__ = f'{src_lang}-{tgt_lang}'
lowercase__ = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''README.md''' )
print(f'Generating {path}' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
lowerCAmelCase = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = model_name.split('-')
lowerCAmelCase = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 110 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = len(_snake_case )
_A : Tuple = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_A : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1,required_sum + 1 ):
_A : Optional[Any] = False
for i in range(1,arr_len + 1 ):
for j in range(1,required_sum + 1 ):
if arr[i - 1] > j:
_A : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
_A : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 0 |
def lowercase_ ( _A : List[str] = 50 ):
"""simple docstring"""
lowerCamelCase__ : Tuple = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 184 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_a : List[str]= logging.get_logger(__name__)
@add_end_docstrings(a__ )
class UpperCamelCase ( a__ ):
def __init__(self : List[Any] , *_A : Union[str, Any] , **_A : str) -> Tuple:
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
requires_backends(self , 'vision')
self.check_model_type(SCREAMING_SNAKE_CASE__)
def __call__(self : str , _A : Optional[Any] , **_A : Tuple) -> List[str]:
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
def _lowercase (self : List[Any] , **_A : List[str]) -> Any:
return {}, {}, {}
def _lowercase (self : Optional[int] , _A : Union[str, Any]) -> Union[str, Any]:
__snake_case : List[Any] = load_image(SCREAMING_SNAKE_CASE__)
__snake_case : Any = image.size
__snake_case : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework)
return model_inputs
def _lowercase (self : Union[str, Any] , _A : int) -> int:
__snake_case : Tuple = self.model(**SCREAMING_SNAKE_CASE__)
return model_outputs
def _lowercase (self : List[str] , _A : Optional[Any]) -> Optional[int]:
__snake_case : List[Any] = model_outputs.predicted_depth
__snake_case : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode='bicubic' , align_corners=SCREAMING_SNAKE_CASE__)
__snake_case : Optional[Any] = prediction.squeeze().cpu().numpy()
__snake_case : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__)).astype('uint8')
__snake_case : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__)
__snake_case : str = {}
__snake_case : Any = predicted_depth
__snake_case : Dict = depth
return output_dict
| 172 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( a__ , unittest.TestCase ):
UpperCamelCase = LDMTextToImagePipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = False
def lowerCamelCase ( self :int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Optional[Any]=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
A = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self :Any ):
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A = pipe(**SCREAMING_SNAKE_CASE__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Union[str, Any]=torch.floataa , __UpperCamelCase :Optional[Any]=0 ):
A = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
A = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self :Optional[Any] ):
A = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A = self.get_inputs(SCREAMING_SNAKE_CASE__ )
A = pipe(**SCREAMING_SNAKE_CASE__ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
A = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Tuple=torch.floataa , __UpperCamelCase :int=0 ):
A = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
A = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self :Any ):
A = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A = self.get_inputs(SCREAMING_SNAKE_CASE__ )
A = pipe(**SCREAMING_SNAKE_CASE__ ).images[0]
A = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 292 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( A_):
UpperCamelCase__: str = []
if isinstance(_snake_case ,_snake_case):
for v in tree.values():
shapes.extend(_fetch_dims(_snake_case))
elif isinstance(_snake_case ,(list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(_snake_case))
elif isinstance(_snake_case ,torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError("Not supported")
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: Dict = []
for d in reversed(_snake_case):
idx.append(flat_idx % d)
UpperCamelCase__: List[str] = flat_idx // d
return tuple(reversed(_snake_case))
@torch.jit.ignore
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ = None ,A_ = None ,):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(A_) -> None:
UpperCamelCase__: List[str] = True
for i in range(len(_snake_case)):
UpperCamelCase__: Dict = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__: Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__: List[str] = [s == 0 for s in start]
reduce_edge_list(_snake_case)
if end_edges is None:
UpperCamelCase__: List[str] = [e == (d - 1) for e, d in zip(_snake_case ,_snake_case)]
reduce_edge_list(_snake_case)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_snake_case) == 0:
return [()]
elif len(_snake_case) == 1:
return [(slice(start[0] ,end[0] + 1),)]
UpperCamelCase__: List[Tuple[slice, ...]] = []
UpperCamelCase__: List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_snake_case ,_snake_case):
if s == e:
path_list.append(slice(_snake_case ,s + 1))
else:
break
UpperCamelCase__: Tuple[slice, ...] = tuple(_snake_case)
UpperCamelCase__: List[Any] = len(_snake_case)
# start == end, and we're done
if divergence_idx == len(_snake_case):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__: Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(_snake_case ,sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__: List[Any] = end[divergence_idx]
return tuple(
path + (slice(_snake_case ,edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__: Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_):
UpperCamelCase__: Tuple = t.shape[:no_batch_dims]
UpperCamelCase__: Dict = list(_flat_idx_to_idx(_snake_case ,_snake_case))
# _get_minimal_slice_set is inclusive
UpperCamelCase__: Any = list(_flat_idx_to_idx(flat_end - 1 ,_snake_case))
# Get an ordered list of slices to perform
UpperCamelCase__: List[str] = _get_minimal_slice_set(
_snake_case ,_snake_case ,_snake_case ,)
UpperCamelCase__: Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_ = False ,A_ = None ,A_ = False ,):
if not (len(_snake_case) > 0):
raise ValueError("Must provide at least one input")
UpperCamelCase__: str = [shape[:no_batch_dims] for shape in _fetch_dims(_snake_case)]
UpperCamelCase__: Dict = tuple([max(_snake_case) for s in zip(*_snake_case)])
def _prep_inputs(A_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__: Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__: Optional[int] = t.reshape(-1 ,*t.shape[no_batch_dims:])
else:
UpperCamelCase__: Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__: Dict[str, Any] = tensor_tree_map(_prep_inputs ,_snake_case)
UpperCamelCase__: int = None
if _out is not None:
UpperCamelCase__: Dict = tensor_tree_map(lambda A_: t.view([-1] + list(t.shape[no_batch_dims:])) ,_out)
UpperCamelCase__: Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__: Optional[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__: int = 0
UpperCamelCase__: Dict = prepped_outputs
for _ in range(_snake_case):
# Chunk the input
if not low_mem:
UpperCamelCase__: List[str] = _select_chunk
else:
UpperCamelCase__: int = partial(
_chunk_slice ,flat_start=_snake_case ,flat_end=min(_snake_case ,i + chunk_size) ,no_batch_dims=len(_snake_case) ,)
UpperCamelCase__: Dict[str, Any] = tensor_tree_map(_snake_case ,_snake_case)
# Run the layer on the chunk
UpperCamelCase__: str = layer(**_snake_case)
# Allocate space for the output
if out is None:
UpperCamelCase__: Dict = tensor_tree_map(lambda A_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) ,_snake_case)
# Put the chunk in its pre-allocated space
if isinstance(_snake_case ,_snake_case):
def assign(A_ ,A_) -> None:
for k, v in da.items():
if isinstance(_snake_case ,_snake_case):
assign(_snake_case ,da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__: Union[str, Any] = da[k]
assign(_snake_case ,_snake_case)
elif isinstance(_snake_case ,_snake_case):
for xa, xa in zip(_snake_case ,_snake_case):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__: List[str] = xa
elif isinstance(_snake_case ,torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__: Dict = output_chunk
else:
raise ValueError("Not supported")
i += chunk_size
UpperCamelCase__: Dict = tensor_tree_map(lambda A_: t.view(orig_batch_dims + t.shape[1:]) ,_snake_case)
return out
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: Optional[int] = 512 , ):
'''simple docstring'''
UpperCamelCase__: int = max_chunk_size
UpperCamelCase__: Optional[int] = None
UpperCamelCase__: Optional[tuple] = None
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: str ):
'''simple docstring'''
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__: List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCamelCase__: Any = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__: Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCamelCase: Tuple ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE__ , chunk_size=SCREAMING_SNAKE_CASE__ )
return True
except RuntimeError:
return False
UpperCamelCase__: Any = 0
UpperCamelCase__: Dict = len(SCREAMING_SNAKE_CASE__ ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__: int = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase__: Dict = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__: Any = i
UpperCamelCase__: Dict = (i + len(SCREAMING_SNAKE_CASE__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = True
for aa, aa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert type(SCREAMING_SNAKE_CASE__ ) == type(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase__: str = [v for _, v in sorted(aa.items() , key=lambda __lowerCamelCase : x[0] )]
UpperCamelCase__: Tuple = [v for _, v in sorted(aa.items() , key=lambda __lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = True
UpperCamelCase__: tuple = tree_map(lambda __lowerCamelCase : a.shape if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Any = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE__ )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__: Dict = False
if not consistent:
UpperCamelCase__: Dict = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
UpperCamelCase__: Any = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 149 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
class a :
"""simple docstring"""
def __init__( self : Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : str = 0
__UpperCAmelCase : int = {}
def UpperCAmelCase ( self : int , __lowercase : Any ) -> Dict:
if vertex not in self.adjacency:
__UpperCAmelCase : Any = {}
self.num_vertices += 1
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : str ) -> Dict:
self.add_vertex(SCREAMING_SNAKE_CASE__ )
self.add_vertex(SCREAMING_SNAKE_CASE__ )
if head == tail:
return
__UpperCAmelCase : int = weight
__UpperCAmelCase : Optional[Any] = weight
def UpperCAmelCase ( self : str ) -> List[str]:
__UpperCAmelCase : Any = self.get_edges()
for edge in edges:
__UpperCAmelCase : List[Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCAmelCase : List[str] = list(edges[i] )
edges.sort(key=lambda __lowercase : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__UpperCAmelCase : Any = edges[i][2] + 1
for edge in edges:
__UpperCAmelCase : Tuple = edge
__UpperCAmelCase : Any = weight
__UpperCAmelCase : List[str] = weight
def __str__( self : Any ) -> Dict:
__UpperCAmelCase : Union[str, Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__UpperCAmelCase : Union[str, Any] = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("""\n""" )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
__UpperCAmelCase : Tuple = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCAmelCase ( self : int ) -> List[str]:
return self.adjacency.keys()
@staticmethod
def UpperCAmelCase ( __lowercase : Tuple=None , __lowercase : str=None ) -> str:
__UpperCAmelCase : Any = Graph()
if vertices is None:
__UpperCAmelCase : Union[str, Any] = []
if edges is None:
__UpperCAmelCase : List[str] = []
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE__ )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE__ )
return g
class a :
"""simple docstring"""
def __init__( self : int ) -> List[str]:
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : List[Any] = {}
def __len__( self : str ) -> Any:
return len(self.parent )
def UpperCAmelCase ( self : Optional[int] , __lowercase : List[Any] ) -> Union[str, Any]:
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : int = item
__UpperCAmelCase : Dict = 0
return item
def UpperCAmelCase ( self : List[str] , __lowercase : Any ) -> str:
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE__ )
if item != self.parent[item]:
__UpperCAmelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def UpperCAmelCase ( self : Any , __lowercase : Optional[Any] , __lowercase : Optional[Any] ) -> Tuple:
__UpperCAmelCase : str = self.find(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : List[Any] = self.find(SCREAMING_SNAKE_CASE__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__UpperCAmelCase : str = roota
return roota
if self.rank[roota] < self.rank[roota]:
__UpperCAmelCase : Union[str, Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__UpperCAmelCase : Optional[Any] = roota
return roota
return None
@staticmethod
def UpperCAmelCase ( __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = graph.num_vertices
__UpperCAmelCase : List[str] = Graph.UnionFind()
__UpperCAmelCase : Tuple = []
while num_components > 1:
__UpperCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
__UpperCAmelCase : List[str] = -1
__UpperCAmelCase : str = graph.get_edges()
for edge in edges:
__UpperCAmelCase : Dict = edge
edges.remove((tail, head, weight) )
for edge in edges:
__UpperCAmelCase : List[Any] = edge
__UpperCAmelCase : int = union_find.find(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Optional[int] = union_find.find(SCREAMING_SNAKE_CASE__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCAmelCase : Dict = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCAmelCase : List[Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__UpperCAmelCase : Tuple = cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE__ ) != union_find.find(SCREAMING_SNAKE_CASE__ ):
union_find.union(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
mst_edges.append(cheap_edge[vertex] )
__UpperCAmelCase : Tuple = num_components - 1
__UpperCAmelCase : int = Graph.build(edges=SCREAMING_SNAKE_CASE__ )
return mst
| 114 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = True ) -> Optional[int]:
"""simple docstring"""
print(F"Converting {name}..." )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_lowercase =timm.create_model('''levit_128s''' , pretrained=_snake_case )
else:
_lowercase =timm.create_model('''levit_128''' , pretrained=_snake_case )
if hidden_sizes == 192:
_lowercase =timm.create_model('''levit_192''' , pretrained=_snake_case )
if hidden_sizes == 256:
_lowercase =timm.create_model('''levit_256''' , pretrained=_snake_case )
if hidden_sizes == 384:
_lowercase =timm.create_model('''levit_384''' , pretrained=_snake_case )
from_model.eval()
_lowercase =LevitForImageClassificationWithTeacher(_snake_case ).eval()
_lowercase =OrderedDict()
_lowercase =from_model.state_dict()
_lowercase =list(from_model.state_dict().keys() )
_lowercase =list(our_model.state_dict().keys() )
print(len(_snake_case ) , len(_snake_case ) )
for i in range(len(_snake_case ) ):
_lowercase =weights[og_keys[i]]
our_model.load_state_dict(_snake_case )
_lowercase =torch.randn((2, 3, 224, 224) )
_lowercase =from_model(_snake_case )
_lowercase =our_model(_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case ), "The model logits don't match the original one."
_lowercase =name
print(_snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowercase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"Pushed {checkpoint_name}" )
def UpperCAmelCase_ ( __snake_case , __snake_case = None , __snake_case = True ) -> Dict:
"""simple docstring"""
_lowercase ="""imagenet-1k-id2label.json"""
_lowercase =1000
_lowercase =(1, num_labels)
_lowercase ="""huggingface/label-files"""
_lowercase =num_labels
_lowercase =json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(_snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
_lowercase =partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case )
_lowercase ={
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
_lowercase ={
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _snake_case , names_to_config[model_name] , _snake_case , _snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _snake_case , _snake_case , _snake_case , _snake_case )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 5 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__SCREAMING_SNAKE_CASE =16
__SCREAMING_SNAKE_CASE =32
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple = 16 ):
lowercase_ : Dict = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase_ : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(__SCREAMING_SNAKE_CASE : str ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_snake_case , max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : List[str] = datasets.map(
_snake_case , batched=_snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : Any = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowercase_ : Any = 8
else:
lowercase_ : Optional[Any] = None
return tokenizer.pad(
_snake_case , padding='longest' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase_ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
lowercase_ : int = DataLoader(
tokenized_datasets['validation'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__SCREAMING_SNAKE_CASE =mocked_dataloaders # noqa: F811
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _snake_case ) == "1":
lowercase_ : Optional[int] = 2
# Initialize accelerator
lowercase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : List[str] = config["""lr"""]
lowercase_ : Union[str, Any] = int(config['num_epochs'] )
lowercase_ : List[str] = int(config['seed'] )
lowercase_ : Optional[Any] = int(config['batch_size'] )
lowercase_ : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowercase_ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase_ : Tuple = batch_size // MAX_GPU_BATCH_SIZE
lowercase_ : Any = MAX_GPU_BATCH_SIZE
set_seed(_snake_case )
lowercase_ : List[Any] = get_dataloaders(_snake_case , _snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : Dict = AdamW(params=model.parameters() , lr=_snake_case )
# Instantiate scheduler
lowercase_ : Dict = get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=1_00 , num_training_steps=(len(_snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ : str = accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ : List[str] = model(**_snake_case )
lowercase_ : Union[str, Any] = outputs.loss
lowercase_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowercase_ : Optional[int] = 0
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : List[str] = model(**_snake_case )
lowercase_ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowercase_ : List[str] = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_snake_case ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowercase_ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase_ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
lowercase_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _snake_case )
def lowercase__( ):
lowercase_ : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_snake_case , default=_snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowercase_ : Dict = parser.parse_args()
lowercase_ : List[str] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 213 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( a__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = SpeechTaTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a : Optional[int] = SpeechTaTokenizer(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = AddedToken('''<mask>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
__a : List[str] = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = """this is a test"""
__a : List[str] = """this is a test"""
return input_text, output_text
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
__a : str = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ )
__a : Any = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
return text, ids
def _lowerCamelCase ( self ):
__a : Optional[Any] = """<pad>"""
__a : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ):
__a : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 81 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _lowerCamelCase ( self ):
__a : int = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__a : Dict = tokenizer.vocab_size
__a : Tuple = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a : Optional[int] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__a : Any = tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__a : Tuple = tokenizer.vocab_size
__a : int = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size + len(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a : List[Any] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__a : Optional[int] = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE__ )
__a : Dict = tokenizer.vocab_size
__a : str = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size_a + len(SCREAMING_SNAKE_CASE__ ) )
__a : int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_tokenizer()
__a : Tuple = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__a : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a : Any = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def _lowerCamelCase ( self ):
__a : Optional[Any] = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__a : Optional[Any] = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=SCREAMING_SNAKE_CASE__ , ) | 160 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=36, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1000, ) -> Union[str, Any]:
UpperCAmelCase_: Optional[int] = parent
UpperCAmelCase_: str = batch_size
UpperCAmelCase_: Dict = num_channels
UpperCAmelCase_: Dict = image_size
UpperCAmelCase_: int = patch_size
UpperCAmelCase_: str = is_training
UpperCAmelCase_: int = use_input_mask
UpperCAmelCase_: Optional[Any] = use_token_type_ids
UpperCAmelCase_: Optional[int] = use_labels
UpperCAmelCase_: str = vocab_size
UpperCAmelCase_: List[str] = hidden_size
UpperCAmelCase_: List[str] = num_hidden_layers
UpperCAmelCase_: str = num_attention_heads
UpperCAmelCase_: Dict = intermediate_size
UpperCAmelCase_: Optional[int] = hidden_act
UpperCAmelCase_: str = hidden_dropout_prob
UpperCAmelCase_: Dict = attention_probs_dropout_prob
UpperCAmelCase_: Dict = max_position_embeddings
UpperCAmelCase_: List[Any] = type_vocab_size
UpperCAmelCase_: Optional[Any] = type_sequence_label_size
UpperCAmelCase_: Optional[int] = initializer_range
UpperCAmelCase_: Union[str, Any] = coordinate_size
UpperCAmelCase_: Any = shape_size
UpperCAmelCase_: Dict = num_labels
UpperCAmelCase_: List[Any] = num_choices
UpperCAmelCase_: List[str] = scope
UpperCAmelCase_: Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase_: List[Any] = text_seq_length
UpperCAmelCase_: Any = (image_size // patch_size) ** 2 + 1
UpperCAmelCase_: Dict = self.text_seq_length + self.image_seq_length
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
UpperCAmelCase_: str = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
UpperCAmelCase_: Optional[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_: Any = bbox[i, j, 3]
UpperCAmelCase_: Dict = bbox[i, j, 1]
UpperCAmelCase_: Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_: Optional[Any] = bbox[i, j, 2]
UpperCAmelCase_: Dict = bbox[i, j, 0]
UpperCAmelCase_: Optional[int] = tmp_coordinate
UpperCAmelCase_: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_: Optional[int] = None
if self.use_input_mask:
UpperCAmelCase_: Tuple = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase_: Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_: Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
UpperCAmelCase_: Optional[Any] = None
UpperCAmelCase_: Any = None
if self.use_labels:
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
UpperCAmelCase_: List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[str] = TFLayoutLMvaModel(config=SCREAMING_SNAKE_CASE__ )
# text + image
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE__, pixel_values=SCREAMING_SNAKE_CASE__, training=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Optional[int] = model(
SCREAMING_SNAKE_CASE__, bbox=SCREAMING_SNAKE_CASE__, pixel_values=SCREAMING_SNAKE_CASE__, attention_mask=SCREAMING_SNAKE_CASE__, token_type_ids=SCREAMING_SNAKE_CASE__, training=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_: Dict = model(SCREAMING_SNAKE_CASE__, bbox=SCREAMING_SNAKE_CASE__, pixel_values=SCREAMING_SNAKE_CASE__, training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE__, training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase_: List[str] = model({"""pixel_values""": pixel_values}, training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Any = self.num_labels
UpperCAmelCase_: List[str] = TFLayoutLMvaForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[Any] = model(
SCREAMING_SNAKE_CASE__, bbox=SCREAMING_SNAKE_CASE__, pixel_values=SCREAMING_SNAKE_CASE__, attention_mask=SCREAMING_SNAKE_CASE__, token_type_ids=SCREAMING_SNAKE_CASE__, labels=SCREAMING_SNAKE_CASE__, training=SCREAMING_SNAKE_CASE__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: str = self.num_labels
UpperCAmelCase_: Tuple = TFLayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Optional[Any] = model(
SCREAMING_SNAKE_CASE__, bbox=SCREAMING_SNAKE_CASE__, pixel_values=SCREAMING_SNAKE_CASE__, attention_mask=SCREAMING_SNAKE_CASE__, token_type_ids=SCREAMING_SNAKE_CASE__, labels=SCREAMING_SNAKE_CASE__, training=SCREAMING_SNAKE_CASE__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: List[Any] = 2
UpperCAmelCase_: List[str] = TFLayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[str] = model(
SCREAMING_SNAKE_CASE__, bbox=SCREAMING_SNAKE_CASE__, pixel_values=SCREAMING_SNAKE_CASE__, attention_mask=SCREAMING_SNAKE_CASE__, token_type_ids=SCREAMING_SNAKE_CASE__, start_positions=SCREAMING_SNAKE_CASE__, end_positions=SCREAMING_SNAKE_CASE__, training=SCREAMING_SNAKE_CASE__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: int = self.prepare_config_and_inputs()
(UpperCAmelCase_): Optional[int] = config_and_inputs
UpperCAmelCase_: str = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _a ( a__ , a__ , unittest.TestCase ):
A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return True
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> dict:
UpperCAmelCase_: Dict = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_: List[Any] = {
k: tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__, 1 ), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(SCREAMING_SNAKE_CASE__, tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_: Union[str, Any] = tf.ones(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_: Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
UpperCAmelCase_: Dict = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_: List[str] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_: Any = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa )
return inputs_dict
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = TFLayoutLMvaModelTester(self )
UpperCAmelCase_: Tuple = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE__, hidden_size=37 )
def __snake_case (self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Any = model_class(SCREAMING_SNAKE_CASE__ )
if getattr(SCREAMING_SNAKE_CASE__, """hf_compute_loss""", SCREAMING_SNAKE_CASE__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase_: Dict = self._prepare_for_class(inputs_dict.copy(), SCREAMING_SNAKE_CASE__, return_labels=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=SCREAMING_SNAKE_CASE__ )[0]
]
UpperCAmelCase_: str = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase_: List[str] = self._prepare_for_class(inputs_dict.copy(), SCREAMING_SNAKE_CASE__, return_labels=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Optional[int] = prepared_for_class.pop("""input_ids""" )
UpperCAmelCase_: Dict = model(SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase_: str = self._prepare_for_class(inputs_dict.copy(), SCREAMING_SNAKE_CASE__, return_labels=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Tuple = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCAmelCase_: Union[str, Any] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase_: Dict = -100
UpperCAmelCase_: int = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase_: Any = self._prepare_for_class(inputs_dict.copy(), SCREAMING_SNAKE_CASE__, return_labels=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase_: List[Any] = self._prepare_for_class(inputs_dict.copy(), SCREAMING_SNAKE_CASE__, return_labels=SCREAMING_SNAKE_CASE__ )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase_: int = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase_: Union[str, Any] = inspect.signature(model.call ).parameters
UpperCAmelCase_: Optional[Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase_: int = {0: """input_ids"""}
for label_key in label_keys:
UpperCAmelCase_: str = signature_names.index(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Union[str, Any] = label_key
UpperCAmelCase_: Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase_: int = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase_: List[str] = prepared_for_class[value]
UpperCAmelCase_: int = tuple(SCREAMING_SNAKE_CASE__ )
# Send to model
UpperCAmelCase_: Optional[int] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __snake_case (self ) -> int:
(
UpperCAmelCase_
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def __snake_case (self ) -> Union[str, Any]:
(
UpperCAmelCase_
): int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_: Any = type
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def __snake_case (self ) -> Tuple:
(
UpperCAmelCase_
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def __snake_case (self ) -> Union[str, Any]:
(
UpperCAmelCase_
): Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def __snake_case (self ) -> Optional[int]:
(
UpperCAmelCase_
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
@slow
def __snake_case (self ) -> int:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Tuple = TFLayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__ ) if is_vision_available() else None
@slow
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Optional[Any] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCAmelCase_: List[str] = self.default_image_processor
UpperCAmelCase_: Tuple = prepare_img()
UpperCAmelCase_: str = image_processor(images=SCREAMING_SNAKE_CASE__, return_tensors="""tf""" ).pixel_values
UpperCAmelCase_: int = tf.constant([[1, 2]] )
UpperCAmelCase_: Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ), axis=0 )
# forward pass
UpperCAmelCase_: Union[str, Any] = model(input_ids=SCREAMING_SNAKE_CASE__, bbox=SCREAMING_SNAKE_CASE__, pixel_values=SCREAMING_SNAKE_CASE__, training=SCREAMING_SNAKE_CASE__ )
# verify the logits
UpperCAmelCase_: Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Union[str, Any] = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE__, atol=1E-4 ) )
| 147 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
"""simple docstring"""
import requests
UpperCAmelCase__ = 'YOUR API KEY'
def __UpperCAmelCase ( lowercase ,lowercase = giphy_api_key ):
"""simple docstring"""
_UpperCAmelCase = """+""".join(query.split() )
_UpperCAmelCase = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
_UpperCAmelCase = requests.get(_snake_case ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 289 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Any = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Tuple = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : Tuple = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : List[Any] = ModelCheckpoint(
dirpath=_snake_case,filename=_snake_case,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=_snake_case,verbose=_snake_case,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Any:
_A : Union[str, Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : Optional[Any] = od / """test_results.txt"""
_A : Tuple = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : List[str] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """a+""" ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : Any = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
_A : Union[str, Any] = val.item()
_A : Optional[int] = F'''{key}: {val:.6f}\n'''
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
_A : str = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[Any]:
try:
_A : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
_A : Union[str, Any] = pl_module.model.num_parameters()
_A : int = count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 26 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import math
import unittest
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 184 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 0 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = [True] * limit
__snake_case : List[Any] = False
__snake_case : int = False
__snake_case : List[Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__snake_case : Tuple = i * 2
while index < limit:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Tuple = [2]
for i in range(3 , _snake_case , 2 ):
if is_prime[i]:
primes.append(_snake_case )
return primes
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] = 1_00_00_00 ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = prime_sieve(_snake_case )
__snake_case : int = 0
__snake_case : Union[str, Any] = 0
for i in range(len(_snake_case ) ):
for j in range(i + length , len(_snake_case ) ):
__snake_case : int = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__snake_case : str = j - i
__snake_case : str = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 172 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 0 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a__ , unittest.TestCase ):
UpperCamelCase = CodeGenTokenizer
UpperCamelCase = CodeGenTokenizerFast
UpperCamelCase = True
UpperCamelCase = {'''add_prefix_space''': True}
UpperCamelCase = False
def lowerCamelCase ( self :Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
A = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
A = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A = {"""unk_token""": """<unk>"""}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def lowerCamelCase ( self :Dict , **__UpperCamelCase :Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :List[str] , **__UpperCamelCase :Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Union[str, Any] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def lowerCamelCase ( self :Dict ):
A = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A = """lower newer"""
A = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A = tokens + [tokenizer.unk_token]
A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :int ):
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
A = """lower newer"""
# Testing tokenization
A = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
A = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
A = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
A = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
A = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
A = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
A = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing the unknown token
A = tokens + [rust_tokenizer.unk_token]
A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :List[str] , *__UpperCamelCase :List[Any] , **__UpperCamelCase :Union[str, Any] ):
pass
def lowerCamelCase ( self :int , __UpperCamelCase :str=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
A = """This is a simple input"""
A = ["""This is a simple input 1""", """This is a simple input 2"""]
A = ("""This is a simple input""", """This is a pair""")
A = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , )
def lowerCamelCase ( self :Optional[int] ):
A = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
A = """This is a simple input"""
A = ["""This is a simple input looooooooong""", """This is a simple input"""]
A = ("""This is a simple input""", """This is a pair""")
A = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
A = tokenizer.pad_token_id
A = tokenizer(SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=30 , return_tensors="np" )
A = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
A = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=60 , return_tensors="np" )
A = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase ( self :List[Any] ):
A = """$$$"""
A = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )
A = """This is a simple input"""
A = ["""This is a simple input 1""", """This is a simple input 2"""]
A = tokenizer.bos_token_id
A = tokenizer(SCREAMING_SNAKE_CASE__ )
A = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A = tokenizer.decode(out_s.input_ids )
A = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase ( self :List[str] ):
A = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
A = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
A = """\nif len_a > len_b: result = a\nelse: result = b"""
A = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A = ["""^#""", re.escape("<|endoftext|>" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
A = tokenizer.decode(SCREAMING_SNAKE_CASE__ , truncate_before_pattern=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :Tuple ):
pass
| 292 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__: int = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Union[str, Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
A__: int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 0 |
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = x
__UpperCAmelCase : Optional[int] = y
for step in range(_snake_case ): # noqa: B007
__UpperCAmelCase : str = a * a - b * b + x
__UpperCAmelCase : str = 2 * a * b + y
__UpperCAmelCase : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( __lowerCamelCase : Any ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_snake_case , 1 , 1 ) )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] = 800 , __lowerCamelCase : List[str] = 600 , __lowerCamelCase : Dict = -0.6 , __lowerCamelCase : Optional[Any] = 0 , __lowerCamelCase : int = 3.2 , __lowerCamelCase : Union[str, Any] = 50 , __lowerCamelCase : int = True , ):
__UpperCAmelCase : Tuple = Image.new("""RGB""" , (image_width, image_height) )
__UpperCAmelCase : Dict = img.load()
# loop through the image-coordinates
for image_x in range(_snake_case ):
for image_y in range(_snake_case ):
# determine the figure-coordinates based on the image-coordinates
__UpperCAmelCase : Union[str, Any] = figure_width / image_width * image_height
__UpperCAmelCase : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
__UpperCAmelCase : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height
__UpperCAmelCase : str = get_distance(_snake_case , _snake_case , _snake_case )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__UpperCAmelCase : List[Any] = get_color_coded_rgb(_snake_case )
else:
__UpperCAmelCase : List[str] = get_black_and_white_rgb(_snake_case )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 114 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> Dict:
_lowercase =tempfile.mkdtemp()
_lowercase =BlipImageProcessor()
_lowercase =BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
_lowercase =BlipProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def __A (self , **UpperCAmelCase ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).tokenizer
def __A (self , **UpperCAmelCase ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def __A (self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __A (self ) -> List[str]:
_lowercase =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowercase =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A (self ) -> str:
_lowercase =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_lowercase =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
_lowercase =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def __A (self ) -> Optional[Any]:
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
_lowercase =self.prepare_image_inputs()
_lowercase =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
_lowercase =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A (self ) -> Union[str, Any]:
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
_lowercase ="""lower newer"""
_lowercase =processor(text=SCREAMING_SNAKE_CASE__ )
_lowercase =tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A (self ) -> Optional[int]:
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
_lowercase ="""lower newer"""
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __A (self ) -> int:
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
_lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase =processor.batch_decode(SCREAMING_SNAKE_CASE__ )
_lowercase =tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A (self ) -> Dict:
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
_lowercase ="""lower newer"""
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 5 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 0 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCamelCase :
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Any = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase_ : List[str] = UNetaDConditionModel(
sample_size=32 ,layers_per_block=1 ,block_out_channels=[32, 64] ,down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] ,mid_block_type='UNetMidBlock2DSimpleCrossAttn' ,up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] ,in_channels=3 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type='text' ,addition_embed_type_num_heads=2 ,cross_attention_norm='group_norm' ,resnet_time_scale_shift='scale_shift' ,act_fn='gelu' ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ : List[str] = DDPMScheduler(
num_train_timesteps=1000 ,beta_schedule='squaredcos_cap_v2' ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=SCREAMING_SNAKE_CASE__ ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type='epsilon' ,variance_type='learned_range' ,)
torch.manual_seed(0 )
lowercase_ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase_ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
sample_size=32 ,layers_per_block=[1, 2] ,block_out_channels=[32, 64] ,down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] ,mid_block_type='UNetMidBlock2DSimpleCrossAttn' ,up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] ,in_channels=6 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type='text' ,addition_embed_type_num_heads=2 ,cross_attention_norm='group_norm' ,resnet_time_scale_shift='scale_shift' ,act_fn='gelu' ,class_embed_type='timestep' ,mid_block_scale_factor=1.414 ,time_embedding_act_fn='gelu' ,time_embedding_dim=32 ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1000 ,beta_schedule='squaredcos_cap_v2' ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=SCREAMING_SNAKE_CASE__ ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type='epsilon' ,variance_type='learned_range' ,)
torch.manual_seed(0 )
lowercase_ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1000 ,beta_schedule='squaredcos_cap_v2' ,beta_start=0.0001 ,beta_end=0.02 ,)
torch.manual_seed(0 )
lowercase_ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = self.get_dummy_components()
lowercase_ : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase_ : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowercase_ : int = inputs["""prompt"""]
lowercase_ : Union[str, Any] = inputs["""generator"""]
lowercase_ : Optional[Any] = inputs["""num_inference_steps"""]
lowercase_ : List[Any] = inputs["""output_type"""]
if "image" in inputs:
lowercase_ : Any = inputs["""image"""]
else:
lowercase_ : Optional[Any] = None
if "mask_image" in inputs:
lowercase_ : str = inputs["""mask_image"""]
else:
lowercase_ : str = None
if "original_image" in inputs:
lowercase_ : str = inputs["""original_image"""]
else:
lowercase_ : Union[str, Any] = None
lowercase_ : int = pipe.encode_prompt(SCREAMING_SNAKE_CASE__ )
# inputs with prompt converted to embeddings
lowercase_ : Optional[Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase_ : Optional[Any] = image
if mask_image is not None:
lowercase_ : Optional[int] = mask_image
if original_image is not None:
lowercase_ : str = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowercase_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase_ : int = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipe_loaded.to(SCREAMING_SNAKE_CASE__ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) is None ,f'''`{optional_component}` did not stay set to None after loading.''' ,)
lowercase_ : Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowercase_ : int = inputs["""generator"""]
lowercase_ : Any = inputs["""num_inference_steps"""]
lowercase_ : Union[str, Any] = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowercase_ : Optional[int] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase_ : Any = image
if mask_image is not None:
lowercase_ : int = mask_image
if original_image is not None:
lowercase_ : str = original_image
lowercase_ : List[str] = pipe_loaded(**SCREAMING_SNAKE_CASE__ )[0]
lowercase_ : Dict = np.abs(to_np(SCREAMING_SNAKE_CASE__ ) - to_np(SCREAMING_SNAKE_CASE__ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE__ ,1e-4 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = self.get_dummy_components()
lowercase_ : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase_ : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowercase_ : Tuple = pipe(**SCREAMING_SNAKE_CASE__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase_ : Union[str, Any] = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipe_loaded.to(SCREAMING_SNAKE_CASE__ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase_ : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowercase_ : str = pipe_loaded(**SCREAMING_SNAKE_CASE__ )[0]
lowercase_ : Tuple = np.abs(to_np(SCREAMING_SNAKE_CASE__ ) - to_np(SCREAMING_SNAKE_CASE__ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE__ ,1e-4 )
| 213 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowercase ( a__ ):
'''simple docstring'''
__lowerCAmelCase = '''mvp'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _UpperCAmelCase=50267 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=4096 , _UpperCAmelCase=16 , _UpperCAmelCase=12 , _UpperCAmelCase=4096 , _UpperCAmelCase=16 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1024 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=100 , _UpperCAmelCase=800 , **_UpperCAmelCase , ):
__a : str = vocab_size
__a : List[str] = max_position_embeddings
__a : Optional[int] = d_model
__a : int = encoder_ffn_dim
__a : Optional[int] = encoder_layers
__a : str = encoder_attention_heads
__a : Any = decoder_ffn_dim
__a : Union[str, Any] = decoder_layers
__a : Any = decoder_attention_heads
__a : Tuple = dropout
__a : Dict = attention_dropout
__a : Tuple = activation_dropout
__a : str = activation_function
__a : List[str] = init_std
__a : str = encoder_layerdrop
__a : Optional[Any] = decoder_layerdrop
__a : Union[str, Any] = classifier_dropout
__a : Dict = use_cache
__a : Tuple = encoder_layers
__a : int = scale_embedding # scale factor will be sqrt(d_model) if True
__a : List[str] = use_prompt
__a : Union[str, Any] = prompt_length
__a : Optional[Any] = prompt_mid_dim
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , SCREAMING_SNAKE_CASE__ ):
__a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' ) | 160 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : str = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class _a ( a__ ):
A = '''efficientformer'''
def __init__(self, SCREAMING_SNAKE_CASE_ = [3, 2, 6, 4], SCREAMING_SNAKE_CASE_ = [48, 96, 224, 448], SCREAMING_SNAKE_CASE_ = [True, True, True, True], SCREAMING_SNAKE_CASE_ = 448, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = 4, SCREAMING_SNAKE_CASE_ = 7, SCREAMING_SNAKE_CASE_ = 5, SCREAMING_SNAKE_CASE_ = 8, SCREAMING_SNAKE_CASE_ = 4, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1E-5, SCREAMING_SNAKE_CASE_ = "gelu", SCREAMING_SNAKE_CASE_ = 0.0_2, SCREAMING_SNAKE_CASE_ = 1E-12, SCREAMING_SNAKE_CASE_ = 224, SCREAMING_SNAKE_CASE_ = 1E-05, **SCREAMING_SNAKE_CASE_, ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Union[str, Any] = hidden_act
UpperCAmelCase_: Tuple = hidden_dropout_prob
UpperCAmelCase_: Tuple = hidden_sizes
UpperCAmelCase_: Tuple = num_hidden_layers
UpperCAmelCase_: Any = num_attention_heads
UpperCAmelCase_: Dict = initializer_range
UpperCAmelCase_: int = layer_norm_eps
UpperCAmelCase_: Dict = patch_size
UpperCAmelCase_: Tuple = num_channels
UpperCAmelCase_: Any = depths
UpperCAmelCase_: Dict = mlp_expansion_ratio
UpperCAmelCase_: int = downsamples
UpperCAmelCase_: Optional[int] = dim
UpperCAmelCase_: Tuple = key_dim
UpperCAmelCase_: Tuple = attention_ratio
UpperCAmelCase_: Union[str, Any] = resolution
UpperCAmelCase_: List[str] = pool_size
UpperCAmelCase_: Union[str, Any] = downsample_patch_size
UpperCAmelCase_: Union[str, Any] = downsample_stride
UpperCAmelCase_: int = downsample_pad
UpperCAmelCase_: List[str] = drop_path_rate
UpperCAmelCase_: Any = num_metaad_blocks
UpperCAmelCase_: List[Any] = distillation
UpperCAmelCase_: Tuple = use_layer_scale
UpperCAmelCase_: List[Any] = layer_scale_init_value
UpperCAmelCase_: List[Any] = image_size
UpperCAmelCase_: str = batch_norm_eps
| 147 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
def __init__( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : List[Any]=30 , __lowerCAmelCase : Any=400 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=0.9 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 30}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
_snake_case : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """crop_pct""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_std""" ) )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 289 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_snake_case = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_snake_case = {
'facebook/blenderbot_small-90M': 512,
}
class lowercase ( a__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , _a=True , **_a , ) -> List[str]:
super().__init__(
ByteLevelBPETokenizer(
vocab=SCREAMING_SNAKE_CASE__ , merges=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , ) , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_A : Dict = add_prefix_space
def a__ ( self , _a , _a=None ) -> int:
_A : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Optional[Any] = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 26 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowercase_ ( _A : List[str] , _A : Optional[Any] ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase_ ( _A : Union[str, Any] , _A : Any , _A : Dict , _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[str] = tmp_path / """cache"""
lowerCamelCase__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__ : Optional[int] = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_sql_dataset(_snake_case , _snake_case )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _A : Optional[Any] , _A : Any , _A : Union[str, Any] , _A : Any ):
"""simple docstring"""
lowerCamelCase__ : Any = tmp_path / """cache"""
lowerCamelCase__ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase__ : Dict = features.copy() if features else default_expected_features
lowerCamelCase__ : Any = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ : int = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=_snake_case , cache_dir=_snake_case ).read()
_check_sql_dataset(_snake_case , _snake_case )
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(_snake_case ) ) as con:
lowerCamelCase__ : Optional[int] = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def lowercase_ ( _A : List[str] , _A : int , _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] = tmp_path / """cache"""
lowerCamelCase__ : Any = os.path.join(_snake_case , "tmp.sql" )
lowerCamelCase__ : Dict = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_snake_case ).read()
SqlDatasetWriter(_snake_case , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
lowerCamelCase__ : List[str] = iter_sql_file(_snake_case )
lowerCamelCase__ : int = iter_sql_file(_snake_case )
for rowa, rowa in zip(_snake_case , _snake_case ):
assert rowa == rowa
@require_sqlalchemy
def lowercase_ ( _A : Union[str, Any] , _A : List[str] , _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : List[str] = tmp_path / """cache"""
lowerCamelCase__ : Union[str, Any] = os.path.join(_snake_case , "tmp.sql" )
lowerCamelCase__ : int = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_snake_case ).read()
SqlDatasetWriter(_snake_case , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
lowerCamelCase__ : Any = iter_sql_file(_snake_case )
lowerCamelCase__ : Union[str, Any] = iter_sql_file(_snake_case )
for rowa, rowa in zip(_snake_case , _snake_case ):
assert rowa == rowa
@require_sqlalchemy
def lowercase_ ( _A : Union[str, Any] , _A : Union[str, Any] , _A : Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[str] = tmp_path / """cache"""
lowerCamelCase__ : Tuple = os.path.join(_snake_case , "tmp.sql" )
lowerCamelCase__ : Any = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_snake_case ).read()
with pytest.raises(_snake_case ):
SqlDatasetWriter(_snake_case , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 184 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Dict) -> Union[str, Any]:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__snake_case : List[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__snake_case : Any = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def _lowercase (self : Optional[Any] , **_A : List[str]) -> Tuple:
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__)
def _lowercase (self : List[str] , **_A : Union[str, Any]) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__)
def _lowercase (self : Tuple , **_A : int) -> Tuple:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__)
def _lowercase (self : Dict) -> int:
shutil.rmtree(self.tmpdirname)
def _lowercase (self : str) -> Dict:
__snake_case : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
__snake_case : Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowercase (self : List[Any]) -> Dict:
__snake_case : List[str] = self.get_tokenizer()
__snake_case : int = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
processor_slow.save_pretrained(self.tmpdirname)
__snake_case : Any = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__)
__snake_case : Optional[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
processor_fast.save_pretrained(self.tmpdirname)
__snake_case : str = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__)
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__)
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__)
def _lowercase (self : str) -> Union[str, Any]:
__snake_case : Any = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__snake_case : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__snake_case : Tuple = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0)
__snake_case : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__)
def _lowercase (self : Tuple) -> Dict:
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
__snake_case : str = self.prepare_image_inputs()
__snake_case : List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np')
__snake_case : Any = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowercase (self : Any) -> Union[str, Any]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Any = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
__snake_case : str = """lower newer"""
__snake_case : Optional[int] = processor(text=SCREAMING_SNAKE_CASE__)
__snake_case : Dict = tokenizer(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowercase (self : Optional[Any]) -> List[str]:
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
__snake_case : Optional[int] = """lower newer"""
__snake_case : int = self.prepare_image_inputs()
__snake_case : str = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__):
processor()
def _lowercase (self : int) -> Union[str, Any]:
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
__snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Dict = processor.batch_decode(SCREAMING_SNAKE_CASE__)
__snake_case : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def _lowercase (self : List[str]) -> Optional[int]:
__snake_case : List[Any] = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : List[str] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
__snake_case : str = """lower newer"""
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : List[str] = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 172 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def A__ ( UpperCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def A__ ( UpperCamelCase , UpperCamelCase ):
A = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
A = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
A = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
A = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
A = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
A = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
A = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
A = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
A = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
A = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
A = key.replace("image_encoder.module" , "flava.image_model" )
A = key.replace("text_encoder.module" , "flava.text_model" )
A = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
A = key.replace("mm_encoder.module" , "flava.multimodal_model" )
A = key.replace("text_projection" , "flava.text_projection" )
A = key.replace("image_projection" , "flava.image_projection" )
A = value.float()
for key, value in codebook_state_dict.items():
A = value
return upgrade
@torch.no_grad()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
if config_path is not None:
A = FlavaConfig.from_pretrained(_snake_case )
else:
A = FlavaConfig()
A = FlavaForPreTraining(_snake_case ).eval()
A = convert_dalle_checkpoint(_snake_case , _snake_case , save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
A = torch.load(_snake_case , map_location="cpu" )
else:
A = torch.hub.load_state_dict_from_url(_snake_case , map_location="cpu" )
A = upgrade_state_dict(_snake_case , _snake_case )
hf_model.load_state_dict(_snake_case )
A = hf_model.state_dict()
A = count_parameters(_snake_case )
A = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_snake_case : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 292 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 0 |
from typing import Any
import numpy as np
def lowerCAmelCase_ ( A_):
return np.array_equal(_snake_case ,matrix.conjugate().T)
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: List[str] = v.conjugate().T
UpperCamelCase__: str = v_star.dot(_snake_case)
assert isinstance(_snake_case ,np.ndarray)
return (v_star_dot.dot(_snake_case)) / (v_star.dot(_snake_case))
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]])
UpperCamelCase__: Dict = np.array([[1], [2], [3]])
assert is_hermitian(_snake_case), F"{a} is not hermitian."
print(rayleigh_quotient(_snake_case ,_snake_case))
UpperCamelCase__: Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
assert is_hermitian(_snake_case), F"{a} is not hermitian."
assert rayleigh_quotient(_snake_case ,_snake_case) == float(3)
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 149 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
a : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : List[Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
a : Optional[Any] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class a ( a__ ):
"""simple docstring"""
a : List[str] = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = ElectraTokenizer
def __init__( self : Any , __lowercase : int=None , __lowercase : List[str]=None , __lowercase : int=True , __lowercase : List[str]="[UNK]" , __lowercase : str="[SEP]" , __lowercase : Dict="[PAD]" , __lowercase : Any="[CLS]" , __lowercase : str="[MASK]" , __lowercase : Union[str, Any]=True , __lowercase : int=None , **__lowercase : Any , ) -> List[Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
__UpperCAmelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : Tuple = do_lower_case
__UpperCAmelCase : Optional[Any] = strip_accents
__UpperCAmelCase : Any = tokenize_chinese_chars
__UpperCAmelCase : Any = normalizer_class(**SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : int = do_lower_case
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] , __lowercase : Tuple=None ) -> List[str]:
__UpperCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] = None ) -> List[int]:
__UpperCAmelCase : Optional[Any] = [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : Any , __lowercase : Optional[Any] , __lowercase : Optional[int] = None ) -> Tuple[str]:
__UpperCAmelCase : str = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 114 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str:
"""simple docstring"""
def run_func(__snake_case ):
@wraps(_snake_case )
def run_in_eager_mode(*__snake_case , **__snake_case ):
return func(*_snake_case , **_snake_case )
@wraps(_snake_case )
@tf.function(experimental_compile=_snake_case )
def run_in_graph_mode(*__snake_case , **__snake_case ):
return func(*_snake_case , **_snake_case )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
_lowercase =random.Random()
_lowercase =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCamelCase__ ( a__):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = "TensorFlow"
@property
def __A (self ) -> Union[str, Any]:
return tf.__version__
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
_lowercase =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowercase =self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_inference )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
_lowercase =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowercase =self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_train )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
_lowercase =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowercase =self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_inference )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
_lowercase =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowercase =self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_train )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Callable[[], None]:
_lowercase =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_lowercase =(
hasattr(SCREAMING_SNAKE_CASE__ , '''architectures''' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowercase ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowercase =__import__('''transformers''' , fromlist=[model_class] )
_lowercase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowercase =model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_lowercase =TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
_lowercase =config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , '''vocab_size''' ) else config.encoder.vocab_size
_lowercase =random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
_lowercase =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Callable[[], None]:
_lowercase =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_lowercase =(
hasattr(SCREAMING_SNAKE_CASE__ , '''architectures''' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowercase ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowercase =__import__('''transformers''' , fromlist=[model_class] )
_lowercase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowercase =model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_lowercase =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
_lowercase =config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , '''vocab_size''' ) else config.encoder.vocab_size
_lowercase =random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowercase =model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
_lowercase =tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowercase =model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
_lowercase =tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
_lowercase =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __A (self , UpperCAmelCase ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(SCREAMING_SNAKE_CASE__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowercase =timeit.repeat(
SCREAMING_SNAKE_CASE__ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn\'t fit on GPU. {e}" )
def __A (self , UpperCAmelCase ) -> [Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
_lowercase =start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
_lowercase ="""N/A"""
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
_lowercase =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowercase =nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE__ )
_lowercase =meminfo.used
_lowercase =Memory(SCREAMING_SNAKE_CASE__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
_lowercase =None
else:
_lowercase =measure_peak_memory_cpu(SCREAMING_SNAKE_CASE__ )
_lowercase =Memory(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowercase =stop_memory_tracing(SCREAMING_SNAKE_CASE__ )
if memory is None:
_lowercase =summary.total
else:
_lowercase =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn\'t fit on GPU. {e}" )
return "N/A", None
| 5 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 0 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = data
lowercase_ : List[Any] = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64)
lowercase_ : Dict = self.data + padding + struct.pack('>Q' ,8 * len(self.data ) )
return padded_data
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 ,len(self.padded_data ) ,64 )
]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = list(struct.unpack('>16L' ,SCREAMING_SNAKE_CASE__ ) ) + [0] * 64
for i in range(16 ,80 ):
lowercase_ : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) ,1 )
return w
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : str = self.padding()
lowercase_ : Any = self.split_blocks()
for block in self.blocks:
lowercase_ : Optional[Any] = self.expand_block(SCREAMING_SNAKE_CASE__ )
lowercase_ : Union[str, Any] = self.h
for i in range(0 ,80 ):
if 0 <= i < 20:
lowercase_ : List[str] = (b & c) | ((~b) & d)
lowercase_ : Dict = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase_ : Optional[Any] = b ^ c ^ d
lowercase_ : str = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase_ : Any = (b & c) | (b & d) | (c & d)
lowercase_ : List[Any] = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase_ : Optional[int] = b ^ c ^ d
lowercase_ : Optional[Any] = 0XC_A_6_2_C_1_D_6
lowercase_ : Optional[Any] = (
self.rotate(SCREAMING_SNAKE_CASE__ ,5 ) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(SCREAMING_SNAKE_CASE__ ,30 ),
c,
d,
)
lowercase_ : List[Any] = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def lowercase__( ):
lowercase_ : Any = b"""Test String"""
assert SHAaHash(_snake_case ).final_hash() == hashlib.shaa(_snake_case ).hexdigest() # noqa: S324
def lowercase__( ):
lowercase_ : Union[str, Any] = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase_ : Tuple = parser.parse_args()
lowercase_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase_ : List[str] = f.read()
else:
lowercase_ : Dict = bytes(_snake_case , 'utf-8' )
print(SHAaHash(_snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 213 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
A = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
A = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
A = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 0 |
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: str , lowerCAmelCase__: List[str] , lowerCAmelCase__: Tuple , ):
"""simple docstring"""
UpperCAmelCase_: Any = len(_snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _snake_case , _snake_case , )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: list[list[str]] = []
depth_first_search([] , [] , [] , _snake_case , _snake_case )
# Print all the boards
for board in boards:
for column in board:
print(_snake_case )
print("""""" )
print(len(_snake_case ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 147 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = [randint(-10_00 ,10_00 ) for i in range(10 )]
_UpperCAmelCase = randint(-50_00 ,50_00 )
return (arr, r)
UpperCAmelCase__ = make_dataset()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
for triplet in permutations(_snake_case ,3 ):
if sum(_snake_case ) == target:
return tuple(sorted(_snake_case ) )
return (0, 0, 0)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(_snake_case )
for i in range(n - 1 ):
_UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_UpperCAmelCase = """
triplet_sum1(*dataset)
"""
_UpperCAmelCase = """
triplet_sum2(*dataset)
"""
_UpperCAmelCase = repeat(setup=_snake_case ,stmt=_snake_case ,repeat=5 ,number=1_00_00 )
_UpperCAmelCase = repeat(setup=_snake_case ,stmt=_snake_case ,repeat=5 ,number=1_00_00 )
return (min(_snake_case ), min(_snake_case ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 289 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowercase ( a__ ):
_a = '''wavlm'''
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.02 , _a=1e-5 , _a="group" , _a="gelu" , _a=(512, 512, 512, 512, 512, 512, 512) , _a=(5, 2, 2, 2, 2, 2, 2) , _a=(10, 3, 3, 3, 3, 2, 2) , _a=False , _a=128 , _a=16 , _a=320 , _a=800 , _a=False , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=320 , _a=2 , _a=0.1 , _a=100 , _a=256 , _a=256 , _a=0.1 , _a="mean" , _a=False , _a=False , _a=256 , _a=(512, 512, 512, 512, 1500) , _a=(5, 3, 3, 1, 1) , _a=(1, 2, 3, 1, 1) , _a=512 , _a=80 , _a=0 , _a=1 , _a=2 , _a=False , _a=3 , _a=2 , _a=3 , _a=None , **_a , ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
_A : Dict = hidden_size
_A : Any = feat_extract_norm
_A : str = feat_extract_activation
_A : Any = list(SCREAMING_SNAKE_CASE__ )
_A : Any = list(SCREAMING_SNAKE_CASE__ )
_A : List[str] = list(SCREAMING_SNAKE_CASE__ )
_A : List[Any] = conv_bias
_A : Dict = num_buckets
_A : List[str] = max_bucket_distance
_A : Union[str, Any] = num_conv_pos_embeddings
_A : Optional[int] = num_conv_pos_embedding_groups
_A : Optional[Any] = len(self.conv_dim )
_A : Optional[int] = num_hidden_layers
_A : Tuple = intermediate_size
_A : List[str] = hidden_act
_A : Tuple = num_attention_heads
_A : Any = hidden_dropout
_A : int = attention_dropout
_A : List[Any] = activation_dropout
_A : Tuple = feat_proj_dropout
_A : Any = final_dropout
_A : Optional[int] = layerdrop
_A : int = layer_norm_eps
_A : Optional[Any] = initializer_range
_A : Optional[Any] = num_ctc_classes
_A : List[str] = vocab_size
_A : Optional[Any] = do_stable_layer_norm
_A : List[str] = use_weighted_layer_sum
_A : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A : Optional[Any] = apply_spec_augment
_A : List[str] = mask_time_prob
_A : Any = mask_time_length
_A : Tuple = mask_time_min_masks
_A : Optional[int] = mask_feature_prob
_A : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
_A : Any = num_codevectors_per_group
_A : str = num_codevector_groups
_A : Tuple = contrastive_logits_temperature
_A : Any = num_negatives
_A : Dict = codevector_dim
_A : List[str] = proj_codevector_dim
_A : Union[str, Any] = diversity_loss_weight
# ctc loss
_A : List[Any] = ctc_loss_reduction
_A : Tuple = ctc_zero_infinity
# adapter
_A : Dict = add_adapter
_A : List[str] = adapter_kernel_size
_A : Optional[int] = adapter_stride
_A : List[str] = num_adapter_layers
_A : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_A : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_A : List[str] = list(SCREAMING_SNAKE_CASE__ )
_A : int = list(SCREAMING_SNAKE_CASE__ )
_A : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
_A : int = xvector_output_dim
@property
def a__ ( self ) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 26 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
def lowercase_ ( _A : Tuple , _A : Union[str, Any] , _A : Optional[int] ):
"""simple docstring"""
def update_area_of_max_square(_A : List[Any] , _A : Dict ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCamelCase__ : Optional[int] = update_area_of_max_square(_snake_case , col + 1 )
lowerCamelCase__ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowerCamelCase__ : int = update_area_of_max_square(row + 1 , _snake_case )
if mat[row][col]:
lowerCamelCase__ : Dict = 1 + min([right, diagonal, down] )
lowerCamelCase__ : List[Any] = max(largest_square_area[0] , _snake_case )
return sub_problem_sol
else:
return 0
lowerCamelCase__ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowercase_ ( _A : List[Any] , _A : Tuple , _A : Optional[Any] ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_A : Optional[int] , _A : Any , _A : str ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCamelCase__ : List[str] = update_area_of_max_square_using_dp_array(_snake_case , col + 1 , _snake_case )
lowerCamelCase__ : Tuple = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _snake_case )
lowerCamelCase__ : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , _snake_case , _snake_case )
if mat[row][col]:
lowerCamelCase__ : Union[str, Any] = 1 + min([right, diagonal, down] )
lowerCamelCase__ : Any = max(largest_square_area[0] , _snake_case )
lowerCamelCase__ : int = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCamelCase__ : int = [0]
lowerCamelCase__ : Any = [[-1] * cols for _ in range(_snake_case )]
update_area_of_max_square_using_dp_array(0 , 0 , _snake_case )
return largest_square_area[0]
def lowercase_ ( _A : Optional[int] , _A : int , _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCamelCase__ : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCamelCase__ : Tuple = dp_array[row][col + 1]
lowerCamelCase__ : Tuple = dp_array[row + 1][col + 1]
lowerCamelCase__ : Union[str, Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCamelCase__ : str = 1 + min(_snake_case , _snake_case , _snake_case )
lowerCamelCase__ : List[Any] = max(dp_array[row][col] , _snake_case )
else:
lowerCamelCase__ : Optional[Any] = 0
return largest_square_area
def lowercase_ ( _A : List[str] , _A : List[Any] , _A : Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[str] = [0] * (cols + 1)
lowerCamelCase__ : List[str] = [0] * (cols + 1)
lowerCamelCase__ : Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCamelCase__ : Dict = current_row[col + 1]
lowerCamelCase__ : str = next_row[col + 1]
lowerCamelCase__ : Any = next_row[col]
if mat[row][col] == 1:
lowerCamelCase__ : Union[str, Any] = 1 + min(_snake_case , _snake_case , _snake_case )
lowerCamelCase__ : int = max(current_row[col] , _snake_case )
else:
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 184 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 0 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_a : Optional[Any]= 'path-to-your-trained-model'
_a : Union[str, Any]= StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_a : int= 'A photo of sks dog in a bucket'
_a : List[str]= pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 172 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 0 |
"""simple docstring"""
def A__ ( UpperCamelCase = 100 ):
A = set()
A = 0
A = n + 1 # maximum limit
for a in range(2 , _snake_case ):
for b in range(2 , _snake_case ):
A = a**b # calculates the current power
collect_powers.add(_snake_case ) # adds the result to the set
return len(_snake_case )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 292 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 0 |
def lowerCAmelCase_ ( A_):
if not isinstance(_snake_case ,_snake_case):
raise TypeError("Input value must be an 'int' type")
UpperCamelCase__: int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 0 |
from math import factorial
class a :
"""simple docstring"""
def __init__( self : Dict , __lowercase : List[Any] , __lowercase : int ) -> int:
__UpperCAmelCase : Any = real
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase : Tuple = [1] * rank
else:
__UpperCAmelCase : int = rank
def __repr__( self : Optional[int] ) -> Optional[int]:
return (
f"""{self.real}+"""
f"""{"+".join(str(SCREAMING_SNAKE_CASE__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def UpperCAmelCase ( self : Dict ) -> List[str]:
__UpperCAmelCase : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE__ )
def __add__( self : List[str] , __lowercase : List[str] ) -> Optional[Any]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return Dual(self.real + other , self.duals )
__UpperCAmelCase : Optional[int] = self.duals.copy()
__UpperCAmelCase : Any = other.duals.copy()
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ )) )
elif len(SCREAMING_SNAKE_CASE__ ) < len(SCREAMING_SNAKE_CASE__ ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ )) )
__UpperCAmelCase : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE__ )
a : int = __add__
def __sub__( self : List[Any] , __lowercase : Dict ) -> Dict:
return self + other * -1
def __mul__( self : Dict , __lowercase : Tuple ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : List[str] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE__ )
a : Any = __mul__
def __truediv__( self : Union[str, Any] , __lowercase : List[str] ) -> List[Any]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase : Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE__ )
raise ValueError
def __floordiv__( self : int , __lowercase : Any ) -> List[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE__ )
raise ValueError
def __pow__( self : Dict , __lowercase : Dict ) -> List[str]:
if n < 0 or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
__UpperCAmelCase : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
if not callable(_snake_case ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_snake_case , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""differentiate() requires an int as input for order""" )
__UpperCAmelCase : Any = Dual(_snake_case , 1 )
__UpperCAmelCase : Optional[int] = func(_snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase__ ( __lowerCamelCase : str ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 114 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 0 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Tuple:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_lowercase =deepcopy(SCREAMING_SNAKE_CASE__ )
elif os.path.exists(SCREAMING_SNAKE_CASE__ ):
with io.open(SCREAMING_SNAKE_CASE__ , '''r''' , encoding='''utf-8''' ) as f:
_lowercase =json.load(SCREAMING_SNAKE_CASE__ )
else:
try:
_lowercase =baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE__ ).decode('''utf-8''' )
_lowercase =json.loads(SCREAMING_SNAKE_CASE__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" )
_lowercase =config
self.set_stage_and_offload()
def __A (self ) -> str:
_lowercase =self.get_value('''zero_optimization.stage''' , -1 )
# offload
_lowercase =False
if self.is_zeroa() or self.is_zeroa():
_lowercase =set(['''cpu''', '''nvme'''] )
_lowercase =set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_lowercase =True
def __A (self , UpperCAmelCase ) -> Any:
_lowercase =self.config
# find the config node of interest if it exists
_lowercase =ds_key_long.split('''.''' )
_lowercase =nodes.pop()
for node in nodes:
_lowercase =config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
return None, ds_key
return config, ds_key
def __A (self , UpperCAmelCase , UpperCAmelCase=None ) -> Dict:
_lowercase =self.find_config_node(SCREAMING_SNAKE_CASE__ )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A (self , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
_lowercase =self.config
# find the config node of interest if it exists
_lowercase =ds_key_long.split('''.''' )
for node in nodes:
_lowercase =config
_lowercase =config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
if must_exist:
raise ValueError(f"Can\'t find {ds_key_long} entry in the config: {self.config}" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE__ )
def __A (self , UpperCAmelCase ) -> Any:
_lowercase =self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else bool(SCREAMING_SNAKE_CASE__ )
def __A (self , UpperCAmelCase ) -> int:
_lowercase =self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else not bool(SCREAMING_SNAKE_CASE__ )
def __A (self ) -> List[Any]:
return self._stage == 2
def __A (self ) -> List[Any]:
return self._stage == 3
def __A (self ) -> Optional[Any]:
return self._offload
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Dict:
_lowercase =engine
def __A (self , UpperCAmelCase , **UpperCAmelCase ) -> Any:
self.engine.backward(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCamelCase__ ( a__):
def __init__(self , UpperCAmelCase ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE__ , device_placement=SCREAMING_SNAKE_CASE__ , scaler=SCREAMING_SNAKE_CASE__ )
_lowercase =hasattr(self.optimizer , '''overflow''' )
def __A (self , UpperCAmelCase=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __A (self ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __A (self ) -> Optional[int]:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCamelCase__ ( a__):
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A (self ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=0.001 , UpperCAmelCase=0 , **UpperCAmelCase ) -> Dict:
_lowercase =params
_lowercase =lr
_lowercase =weight_decay
_lowercase =kwargs
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=0 , **UpperCAmelCase ) -> Dict:
_lowercase =optimizer
_lowercase =total_num_steps
_lowercase =warmup_num_steps
_lowercase =kwargs
| 5 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 0 |
"""simple docstring"""
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__SCREAMING_SNAKE_CASE =get_logger()
__SCREAMING_SNAKE_CASE =None
class UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE__ )
import jax
from jaxlib.xla_client import Device
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE__ )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowercase_ : Tuple = device if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase_ : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowercase_ : Tuple = str(jax.devices()[0] )
lowercase_ : Optional[int] = jnp_array_kwargs
@staticmethod
def _UpperCAmelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(SCREAMING_SNAKE_CASE__ ): device for device in jax.devices()}
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE__ ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(SCREAMING_SNAKE_CASE__ ,axis=0 )
return column
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE__ ,(str, bytes, type(SCREAMING_SNAKE_CASE__ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE__ ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
lowercase_ : Optional[Any] = {}
if isinstance(SCREAMING_SNAKE_CASE__ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase_ : int = {"""dtype""": jnp.intaa}
else:
lowercase_ : Union[str, Any] = {"""dtype""": jnp.intaa}
elif isinstance(SCREAMING_SNAKE_CASE__ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
lowercase_ : int = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE__ ,PIL.Image.Image ):
lowercase_ : Any = np.asarray(SCREAMING_SNAKE_CASE__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase_ : Optional[int] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(SCREAMING_SNAKE_CASE__ ,**{**default_dtype, **self.jnp_array_kwargs} )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(SCREAMING_SNAKE_CASE__ ,'__array__' ) and not isinstance(SCREAMING_SNAKE_CASE__ ,jax.Array ):
lowercase_ : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
return map_nested(self._recursive_tensorize ,SCREAMING_SNAKE_CASE__ ,map_list=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Mapping:
'''simple docstring'''
lowercase_ : List[Any] = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE__ )
lowercase_ : Optional[int] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE__ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> "jax.Array":
'''simple docstring'''
lowercase_ : List[Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE__ )
lowercase_ : int = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE__ ,pa_table.column_names[0] )
lowercase_ : str = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
lowercase_ : Any = self._consolidate(SCREAMING_SNAKE_CASE__ )
return column
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Mapping:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE__ )
lowercase_ : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE__ )
lowercase_ : Tuple = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
for column_name in batch:
lowercase_ : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 213 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[Any] = tempfile.mkdtemp()
__a : List[Any] = BlipImageProcessor()
__a : Optional[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__a : Union[str, Any] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__a : str = InstructBlipProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).tokenizer
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).qformer_tokenizer
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ):
__a : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__a : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__a : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
__a : List[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_image_processor()
__a : Any = self.get_tokenizer()
__a : Tuple = self.get_qformer_tokenizer()
__a : str = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.prepare_image_inputs()
__a : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
__a : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.get_image_processor()
__a : str = self.get_tokenizer()
__a : str = self.get_qformer_tokenizer()
__a : str = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = """lower newer"""
__a : int = processor(text=SCREAMING_SNAKE_CASE__ )
__a : Any = tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
__a : int = qformer_tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : int = self.get_qformer_tokenizer()
__a : Optional[Any] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = """lower newer"""
__a : Optional[int] = self.prepare_image_inputs()
__a : Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _lowerCamelCase ( self ):
__a : Dict = self.get_image_processor()
__a : int = self.get_tokenizer()
__a : Any = self.get_qformer_tokenizer()
__a : Union[str, Any] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Tuple = self.get_qformer_tokenizer()
__a : List[str] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = """lower newer"""
__a : Tuple = self.prepare_image_inputs()
__a : Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) | 160 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: List[str] = 1_0_0_0 ):
"""simple docstring"""
UpperCAmelCase_: int = 2**power
UpperCAmelCase_: Optional[int] = str(_snake_case )
UpperCAmelCase_: Union[str, Any] = list(_snake_case )
UpperCAmelCase_: Optional[int] = 0
for i in list_num:
sum_of_num += int(_snake_case )
return sum_of_num
if __name__ == "__main__":
a : List[str] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
a : List[str] = solution(power)
print('Sum of the digits is: ', result)
| 147 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 0 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a ( a__ ):
def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = parent
_UpperCAmelCase = config_class
_UpperCAmelCase = has_text_modality
_UpperCAmelCase = kwargs
_UpperCAmelCase = common_properties
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ):
try:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=f'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ):
try:
_UpperCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=f'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""" )
config_first.to_json_file(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = self.config_class.from_json_file(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
config_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_UpperCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowerCAmelCase_ ( self : str ):
if self.config_class.is_composition:
return
_UpperCAmelCase = self.config_class()
self.parent.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = self.config_class(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) != value:
wrong_values.append((key, getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), value) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase = """\n""".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def lowerCAmelCase_ ( self : int ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 289 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=False , ) -> Union[str, Any]:
_A : Optional[Any] = size if size is not None else {"""height""": 20, """width""": 20}
_A : Optional[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_A : int = parent
_A : Any = batch_size
_A : Tuple = num_channels
_A : Optional[int] = image_size
_A : Optional[Any] = min_resolution
_A : int = max_resolution
_A : Dict = do_resize
_A : Any = size
_A : Union[str, Any] = do_center_crop
_A : Union[str, Any] = crop_size
_A : Any = do_normalize
_A : str = image_mean
_A : str = image_std
_A : Any = do_reduce_labels
def a__ ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase_ ( ):
_A : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""",split="""test""" )
_A : Union[str, Any] = Image.open(dataset[0]["""file"""] )
_A : int = Image.open(dataset[1]["""file"""] )
return image, map
def lowerCAmelCase_ ( ):
_A : str = load_dataset("""hf-internal-testing/fixtures_ade20k""",split="""test""" )
_A : Dict = Image.open(ds[0]["""file"""] )
_A : Tuple = Image.open(ds[1]["""file"""] )
_A : List[str] = Image.open(ds[2]["""file"""] )
_A : Tuple = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase ( a__,unittest.TestCase ):
_a = BeitImageProcessor if is_vision_available() else None
def a__ ( self ) -> str:
_A : Optional[int] = BeitImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_std""" ) )
def a__ ( self ) -> str:
_A : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE__ )
_A : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE__ )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> List[Any]:
_A : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
_A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : int = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Any = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Tuple:
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
_A : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
_A : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
_A : str = prepare_semantic_single_inputs()
_A : Dict = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
_A : str = prepare_semantic_batch_inputs()
_A : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def a__ ( self ) -> Dict:
_A : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_A : Dict = prepare_semantic_single_inputs()
_A : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
_A : List[Any] = True
_A : int = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 26 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _lowercase ( a__):
"""simple docstring"""
A__ = '''gptj'''
A__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any]=50400 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Dict=4096 , __lowerCamelCase : List[str]=28 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Optional[Any]=64 , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]="gelu_new" , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Optional[Any]=1E-5 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=50256 , __lowerCamelCase : Any=50256 , __lowerCamelCase : Any=False , **__lowerCamelCase : Any , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Optional[int] = n_positions
lowerCamelCase__ : Optional[Any] = n_embd
lowerCamelCase__ : Union[str, Any] = n_layer
lowerCamelCase__ : Optional[Any] = n_head
lowerCamelCase__ : Any = n_inner
lowerCamelCase__ : str = rotary_dim
lowerCamelCase__ : Optional[int] = activation_function
lowerCamelCase__ : Tuple = resid_pdrop
lowerCamelCase__ : Optional[Any] = embd_pdrop
lowerCamelCase__ : Optional[Any] = attn_pdrop
lowerCamelCase__ : Optional[Any] = layer_norm_epsilon
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : str = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _lowercase ( a__):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str = "default" , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : List[Any] = False , ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ , task=SCREAMING_SNAKE_CASE__ , patching_specs=SCREAMING_SNAKE_CASE__ , use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config , "pad_token_id" , SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
lowerCamelCase__ : List[str] = 0
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="inputs" )
lowerCamelCase__ : Optional[int] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self._config.n_head
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] = -1 , __lowerCamelCase : Tuple = -1 , __lowerCamelCase : Optional[Any] = False , __lowerCamelCase : List[Any] = None , ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Optional[Any] = seqlen + 2
lowerCamelCase__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : str = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : List[Any] = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ : List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 13
| 184 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_a : Optional[Any]= {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any]= ['BeitFeatureExtractor']
_a : Any= ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any]= [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple= [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_a : Optional[Any]= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 172 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCAmelCase ( a__ ):
UpperCamelCase = '''yolos'''
def __init__( self :List[Any] , __UpperCamelCase :Dict=7_68 , __UpperCamelCase :Dict=12 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[int]=30_72 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Tuple=0.0 , __UpperCamelCase :Any=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :str=1e-12 , __UpperCamelCase :Union[str, Any]=[5_12, 8_64] , __UpperCamelCase :Dict=16 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Optional[Any]=True , __UpperCamelCase :str=1_00 , __UpperCamelCase :str=True , __UpperCamelCase :Tuple=False , __UpperCamelCase :Optional[int]=1 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=2 , __UpperCamelCase :Dict=5 , __UpperCamelCase :List[str]=2 , __UpperCamelCase :Optional[int]=0.1 , **__UpperCamelCase :Optional[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class _UpperCAmelCase ( a__ ):
UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase ( self :List[str] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase ( self :List[Any] ):
return 1e-4
@property
def lowerCamelCase ( self :Optional[Any] ):
return 12
| 292 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 0 |
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase__: List[Any] = """"""
UpperCamelCase__: Dict = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_snake_case) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase__: List[str] = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase__: List[Any] = [1 for i in range(len(_snake_case))]
# for each character in new_string find corresponding palindromic string
UpperCamelCase__: Tuple = 0
for j in range(len(_snake_case)):
UpperCamelCase__: Tuple = 1 if j > r else min(length[l + r - j] // 2 ,r - j + 1)
while (
j - k >= 0
and j + k < len(_snake_case)
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase__: int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase__: Optional[Any] = j - k + 1 # noqa: E741
UpperCamelCase__: Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase__: Union[str, Any] = length[j]
UpperCamelCase__: List[Any] = j
# create that string
UpperCamelCase__: Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 114 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 0 |
from ...configuration_utils import PretrainedConfig
class lowerCamelCase__ ( a__):
SCREAMING_SNAKE_CASE__ = '''bert-generation'''
def __init__(self , UpperCAmelCase=5_0_3_5_8 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=2_4 , UpperCAmelCase=1_6 , UpperCAmelCase=4_0_9_6 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase="absolute" , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =hidden_act
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
| 5 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a__ , unittest.TestCase ):
lowercase = CTRLTokenizer
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : Any = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
lowercase_ : Dict = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase_ : Tuple = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
lowercase_ : List[str] = {"""unk_token""": """<unk>"""}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : int = """adapt react readapt apt"""
lowercase_ : str = """adapt react readapt apt"""
return input_text, output_text
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowercase_ : Dict = """adapt react readapt apt"""
lowercase_ : Any = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
lowercase_ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowercase_ : str = tokens + [tokenizer.unk_token]
lowercase_ : int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
| 213 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 0 |
"""simple docstring"""
A = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager | 160 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
a : Optional[int] = None
a : Optional[Any] = {
'7B': 11_008,
'13B': 13_824,
'30B': 17_920,
'65B': 22_016,
'70B': 28_672,
}
a : List[str] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[int]=1 , lowerCAmelCase__: Dict=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCAmelCase_ (lowerCAmelCase__: List[str] ):
"""simple docstring"""
with open(_snake_case , """r""" ) as f:
return json.load(_snake_case )
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: int ):
"""simple docstring"""
with open(_snake_case , """w""" ) as f:
json.dump(_snake_case , _snake_case )
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Tuple , lowerCAmelCase__: Any , lowerCAmelCase__: Union[str, Any]=True ):
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase_: Tuple = os.path.join(_snake_case , """tmp""" )
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase_: Any = read_json(os.path.join(_snake_case , """params.json""" ) )
UpperCAmelCase_: Union[str, Any] = NUM_SHARDS[model_size]
UpperCAmelCase_: Optional[int] = params["""n_layers"""]
UpperCAmelCase_: str = params["""n_heads"""]
UpperCAmelCase_: List[str] = n_heads // num_shards
UpperCAmelCase_: List[str] = params["""dim"""]
UpperCAmelCase_: Optional[Any] = dim // n_heads
UpperCAmelCase_: str = 10000.0
UpperCAmelCase_: List[str] = 1.0 / (base ** (torch.arange(0 , _snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase_: List[str] = params["""n_kv_heads"""] # for GQA / MQA
UpperCAmelCase_: str = n_heads_per_shard // num_key_value_heads
UpperCAmelCase_: str = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase_: List[Any] = n_heads
UpperCAmelCase_: List[Any] = n_heads_per_shard
UpperCAmelCase_: List[Any] = dim
# permute for sliced rotary
def permute(lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any]=n_heads , lowerCAmelCase__: str=dim , lowerCAmelCase__: int=dim ):
return w.view(_snake_case , dima // n_heads // 2 , 2 , _snake_case ).transpose(1 , 2 ).reshape(_snake_case , _snake_case )
print(F'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase_: List[Any] = torch.load(os.path.join(_snake_case , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
UpperCAmelCase_: Tuple = [
torch.load(os.path.join(_snake_case , F'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(_snake_case )
]
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: List[Any] = {"""weight_map""": {}}
for layer_i in range(_snake_case ):
UpperCAmelCase_: Optional[int] = F'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
UpperCAmelCase_: Dict = {
F'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wq.weight'] ),
F'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wk.weight'] ),
F'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[F'layers.{layer_i}.attention.wv.weight'],
F'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[F'layers.{layer_i}.attention.wo.weight'],
F'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w1.weight'],
F'model.layers.{layer_i}.mlp.down_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w2.weight'],
F'model.layers.{layer_i}.mlp.up_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w3.weight'],
F'model.layers.{layer_i}.input_layernorm.weight': loaded[F'layers.{layer_i}.attention_norm.weight'],
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[F'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase_: int = {
F'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
F'layers.{layer_i}.attention_norm.weight'
].clone(),
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
F'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
UpperCAmelCase_: int = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wq.weight'].view(_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case ) )
UpperCAmelCase_: List[Any] = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wk.weight'].view(
_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case ) , _snake_case , _snake_case , _snake_case , )
UpperCAmelCase_: Dict = torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wv.weight'].view(
_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case )
UpperCAmelCase_: Optional[Any] = torch.cat(
[loaded[i][F'layers.{layer_i}.attention.wo.weight'] for i in range(_snake_case )] , dim=1 )
UpperCAmelCase_: Optional[int] = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_snake_case )] , dim=0 )
UpperCAmelCase_: Dict = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_snake_case )] , dim=1 )
UpperCAmelCase_: Any = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_snake_case )] , dim=0 )
UpperCAmelCase_: Optional[Any] = inv_freq
for k, v in state_dict.items():
UpperCAmelCase_: Optional[int] = filename
param_count += v.numel()
torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) )
UpperCAmelCase_: int = F'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
UpperCAmelCase_: Dict = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
UpperCAmelCase_: Optional[Any] = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_snake_case )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase_: Tuple = filename
param_count += v.numel()
torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) )
# Write configs
UpperCAmelCase_: str = {"""total_size""": param_count * 2}
write_json(_snake_case , os.path.join(_snake_case , """pytorch_model.bin.index.json""" ) )
UpperCAmelCase_: Optional[int] = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
UpperCAmelCase_: Tuple = params["""multiple_of"""] if """multiple_of""" in params else 2_5_6
UpperCAmelCase_: Union[str, Any] = LlamaConfig(
hidden_size=_snake_case , intermediate_size=compute_intermediate_size(_snake_case , _snake_case , _snake_case ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=_snake_case , )
config.save_pretrained(_snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
UpperCAmelCase_: List[Any] = LlamaForCausalLM.from_pretrained(_snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=_snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_snake_case , safe_serialization=_snake_case )
shutil.rmtree(_snake_case )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: int = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
UpperCAmelCase_: Tuple = tokenizer_class(_snake_case )
tokenizer.save_pretrained(_snake_case )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: int = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=_snake_case , help="""Whether or not to save using `safetensors`.""" )
UpperCAmelCase_: Optional[int] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase_: Any = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , _snake_case )
if __name__ == "__main__":
main()
| 147 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = R"""\w+[.]\d+"""
_UpperCAmelCase = re.findall(_snake_case ,_snake_case )
for pat in pats:
_UpperCAmelCase = key.replace(_snake_case ,"""_""".join(pat.split(""".""" ) ) )
return key
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_UpperCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_UpperCAmelCase = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
_UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=42 ):
"""simple docstring"""
# Step 1: Convert pytorch tensor to numpy
_UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_UpperCAmelCase = flax_model.init_weights(PRNGKey(_snake_case ) )
_UpperCAmelCase = flatten_dict(_snake_case )
_UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase = rename_key(_snake_case )
_UpperCAmelCase = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
_UpperCAmelCase = rename_key_and_reshape_tensor(_snake_case ,_snake_case ,_snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
| 289 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = 'Hello world! cécé herlolip'
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
_A : Any = roberta.model.encoder.sentence_encoder
_A : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,hidden_size=roberta.cfg.model.encoder_embed_dim,num_hidden_layers=roberta.cfg.model.encoder_layers,num_attention_heads=roberta.cfg.model.encoder_attention_heads,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim,max_position_embeddings=514,type_vocab_size=1,layer_norm_eps=1e-5,)
if classification_head:
_A : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""",_snake_case )
_A : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
_A : Optional[int] = roberta_sent_encoder.embed_tokens.weight
_A : int = roberta_sent_encoder.embed_positions.weight
_A : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_A : Optional[int] = roberta_sent_encoder.layer_norm.weight
_A : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_A : BertLayer = model.roberta.encoder.layer[i]
_A : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
_A : RobertaAttention = layer.attention
_A : List[str] = roberta_layer.self_attn_layer_norm.weight
_A : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
_A : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_A : List[str] = roberta_layer.self_attn.q_proj.weight
_A : Tuple = roberta_layer.self_attn.q_proj.bias
_A : Tuple = roberta_layer.self_attn.k_proj.weight
_A : int = roberta_layer.self_attn.k_proj.bias
_A : List[str] = roberta_layer.self_attn.v_proj.weight
_A : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
_A : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_A : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
_A : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_A : Tuple = roberta_layer.final_layer_norm.weight
_A : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
_A : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_A : List[Any] = roberta_layer.fca.weight
_A : Tuple = roberta_layer.fca.bias
# output
_A : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_A : Tuple = roberta_layer.fca.weight
_A : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
_A : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
_A : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
_A : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
_A : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
_A : str = roberta.model.encoder.lm_head.dense.weight
_A : List[Any] = roberta.model.encoder.lm_head.dense.bias
_A : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
_A : Dict = roberta.model.encoder.lm_head.layer_norm.bias
_A : Optional[int] = roberta.model.encoder.lm_head.weight
_A : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_A : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
_A : Tuple = model(_snake_case )[0]
if classification_head:
_A : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
_A : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape,their_output.shape )
_A : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_A : Tuple = torch.allclose(_snake_case,_snake_case,atol=1e-3 )
print("""Do both models output the same tensors?""","""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 26 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : str = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
A : str = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
A : Optional[int] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : str=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ )
(lowerCamelCase__) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 184 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[Any]= logging.get_logger(__name__)
_a : Tuple= {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class UpperCamelCase ( a__ ):
UpperCAmelCase : int = '''luke'''
def __init__(self : Optional[int] , _A : Tuple=5_02_67 , _A : List[Any]=50_00_00 , _A : Optional[int]=7_68 , _A : Dict=2_56 , _A : Optional[int]=12 , _A : Optional[Any]=12 , _A : str=30_72 , _A : int="gelu" , _A : Dict=0.1 , _A : List[Any]=0.1 , _A : List[Any]=5_12 , _A : int=2 , _A : str=0.02 , _A : Dict=1E-12 , _A : int=True , _A : Dict=None , _A : Union[str, Any]=1 , _A : Tuple=0 , _A : List[str]=2 , **_A : Optional[int] , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
__snake_case : List[str] = vocab_size
__snake_case : Union[str, Any] = entity_vocab_size
__snake_case : Optional[Any] = hidden_size
__snake_case : int = entity_emb_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : int = hidden_act
__snake_case : List[str] = intermediate_size
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : int = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : Any = use_entity_aware_attention
__snake_case : Union[str, Any] = classifier_dropout
| 172 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 0 |
"""simple docstring"""
import sys
import turtle
def A__ ( UpperCamelCase , UpperCamelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_snake_case , get_mid(_snake_case , _snake_case ) , get_mid(_snake_case , _snake_case ) , depth - 1 )
triangle(_snake_case , get_mid(_snake_case , _snake_case ) , get_mid(_snake_case , _snake_case ) , depth - 1 )
triangle(_snake_case , get_mid(_snake_case , _snake_case ) , get_mid(_snake_case , _snake_case ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
_snake_case : List[Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
_snake_case : Union[str, Any] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 292 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A__: Tuple = random.Random()
if is_torch_available():
import torch
def lowerCAmelCase_ ( A_ ,A_=1.0 ,A_=None ,A_=None):
if rng is None:
UpperCamelCase__: Optional[Any] = global_rng
UpperCamelCase__: List[str] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple=7 , __lowerCamelCase: str=400 , __lowerCamelCase: int=2000 , __lowerCamelCase: Tuple=1 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[Any]=1_6000 , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[str]=True , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = parent
UpperCamelCase__: int = batch_size
UpperCamelCase__: Dict = min_seq_length
UpperCamelCase__: List[str] = max_seq_length
UpperCamelCase__: Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__: Union[str, Any] = feature_size
UpperCamelCase__: Optional[Any] = padding_value
UpperCamelCase__: Any = sampling_rate
UpperCamelCase__: Optional[Any] = return_attention_mask
UpperCamelCase__: List[Any] = do_normalize
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: List[Any]=False ):
'''simple docstring'''
def _flatten(__lowerCamelCase: int ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) )
if equal_length:
UpperCamelCase__: int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase__: str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a ( a__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = ASTFeatureExtractor
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = ASTFeatureExtractionTester(self )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__: List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__: Dict = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__: int = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCamelCase__: Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# Test batched
UpperCamelCase__: List[str] = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="np" ).input_values
UpperCamelCase__: str = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__: List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__: Optional[int] = np.asarray(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="np" ).input_values
UpperCamelCase__: Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
@require_torch
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
import torch
UpperCamelCase__: Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__: Any = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase__: List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__: Any = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase__: Dict = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: str ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase__: Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase__: Union[str, Any] = ds.sort("id" ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
UpperCamelCase__: Optional[int] = self._load_datasamples(1 )
UpperCamelCase__: int = ASTFeatureExtractor()
UpperCamelCase__: Dict = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 149 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
a : Union[str, Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
for attribute in key.split(""".""" ):
__UpperCAmelCase : Dict = getattr(_snake_case , _snake_case )
if weight_type is not None:
__UpperCAmelCase : int = getattr(_snake_case , _snake_case ).shape
else:
__UpperCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : List[Any] = value
elif weight_type == "bias":
__UpperCAmelCase : Dict = value
else:
__UpperCAmelCase : int = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : int = []
__UpperCAmelCase : List[str] = fairseq_model.state_dict()
__UpperCAmelCase : str = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCAmelCase : int = True
if "*" in mapped_key:
__UpperCAmelCase : Dict = name.split(_snake_case )[0].split(""".""" )[-2]
__UpperCAmelCase : Union[str, Any] = mapped_key.replace("""*""" , _snake_case )
if "weight_g" in name:
__UpperCAmelCase : Dict = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : int = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
__UpperCAmelCase : str = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : Any = """weight"""
else:
__UpperCAmelCase : List[str] = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : List[str] = name.split(""".""" )
__UpperCAmelCase : Optional[int] = int(items[0] )
__UpperCAmelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCAmelCase : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCAmelCase : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCAmelCase : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCAmelCase : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=None ):
# load the pre-trained checkpoints
__UpperCAmelCase : Any = torch.load(_snake_case )
__UpperCAmelCase : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
__UpperCAmelCase : Optional[Any] = WavLMOrig(_snake_case )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
__UpperCAmelCase : str = WavLMConfig.from_pretrained(_snake_case )
else:
__UpperCAmelCase : Any = WavLMConfig()
__UpperCAmelCase : Tuple = WavLMModel(_snake_case )
recursively_load_weights(_snake_case , _snake_case )
hf_wavlm.save_pretrained(_snake_case )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
a : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 114 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
UpperCAmelCase__ = '▁'
class lowerCamelCase__ ( a__):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<unk>" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[MASK]" , UpperCAmelCase="[CLS]" , **UpperCAmelCase , ) -> Any:
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_lowercase =vocab_file
_lowercase =False if not self.vocab_file else True
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowercase =os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 5 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCamelCase ( a__ ):
lowercase = '''roberta'''
def __init__( self ,__UpperCamelCase=5_0265 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=1 ,__UpperCamelCase=0 ,__UpperCamelCase=2 ,__UpperCamelCase="absolute" ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
lowercase_ : Dict = vocab_size
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : List[Any] = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : str = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Tuple = type_vocab_size
lowercase_ : Any = initializer_range
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : List[str] = position_embedding_type
lowercase_ : str = use_cache
lowercase_ : List[Any] = classifier_dropout
class UpperCamelCase ( a__ ):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 213 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( a_ :int) -> Optional[int]:
__a : List[Any] = 3_84
__a : Tuple = 7
if "tiny" in model_name:
__a : int = 96
__a : str = (2, 2, 6, 2)
__a : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
__a : Union[str, Any] = 96
__a : Any = (2, 2, 18, 2)
__a : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
__a : Tuple = 1_28
__a : List[Any] = (2, 2, 18, 2)
__a : int = (4, 8, 16, 32)
__a : Optional[int] = 12
__a : Optional[int] = 5_12
elif "large" in model_name:
__a : Optional[Any] = 1_92
__a : int = (2, 2, 18, 2)
__a : int = (6, 12, 24, 48)
__a : List[Any] = 12
__a : Optional[Any] = 7_68
# set label information
__a : Optional[Any] = 1_50
__a : Tuple = """huggingface/label-files"""
__a : List[str] = """ade20k-id2label.json"""
__a : str = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''') , '''r'''))
__a : Union[str, Any] = {int(_snake_case): v for k, v in idalabel.items()}
__a : List[Any] = {v: k for k, v in idalabel.items()}
__a : str = SwinConfig(
embed_dim=_snake_case , depths=_snake_case , num_heads=_snake_case , window_size=_snake_case , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
__a : int = UperNetConfig(
backbone_config=_snake_case , auxiliary_in_channels=_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case , )
return config
def __A ( a_ :Dict) -> Tuple:
__a : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias'''))
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight'''))
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias"""))
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight"""))
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight"""))
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias"""))
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight"""))
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias"""))
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
])
# fmt: on
return rename_keys
def __A ( a_ :Union[str, Any] , a_ :Optional[int] , a_ :Dict) -> List[Any]:
__a : Optional[Any] = dct.pop(_snake_case)
__a : Tuple = val
def __A ( a_ :Union[str, Any] , a_ :int) -> Tuple:
__a : int = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
__a : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__a : List[Any] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""")
__a : Union[str, Any] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
__a : Tuple = in_proj_weight[:dim, :]
__a : List[Any] = in_proj_bias[: dim]
__a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
__a : List[Any] = in_proj_bias[
dim : dim * 2
]
__a : Tuple = in_proj_weight[
-dim :, :
]
__a : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def __A ( a_ :Tuple) -> Union[str, Any]:
__a : List[Any] = x.shape
__a : List[Any] = x.reshape(_snake_case , 4 , in_channel // 4)
__a : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2).reshape(_snake_case , _snake_case)
return x
def __A ( a_ :Optional[int]) -> Any:
__a : Optional[Any] = x.shape
__a : Any = x.reshape(_snake_case , in_channel // 4 , 4)
__a : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2).reshape(_snake_case , _snake_case)
return x
def __A ( a_ :Dict) -> List[Any]:
__a : Tuple = x.shape[0]
__a : List[str] = x.reshape(4 , in_channel // 4)
__a : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 , 1).reshape(_snake_case)
return x
def __A ( a_ :str) -> Union[str, Any]:
__a : int = x.shape[0]
__a : List[str] = x.reshape(in_channel // 4 , 4)
__a : Tuple = x[:, [0, 2, 1, 3]].transpose(0 , 1).reshape(_snake_case)
return x
def __A ( a_ :Any , a_ :Tuple , a_ :Optional[int]) -> str:
__a : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
__a : Optional[int] = model_name_to_url[model_name]
__a : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' , file_name=_snake_case)[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case , param.shape)
__a : Optional[Any] = get_upernet_config(_snake_case)
__a : List[str] = UperNetForSemanticSegmentation(_snake_case)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a : Optional[int] = state_dict.pop(_snake_case)
if "bn" in key:
__a : Optional[int] = key.replace('''bn''' , '''batch_norm''')
__a : Dict = val
# rename keys
__a : str = create_rename_keys(_snake_case)
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case)
read_in_q_k_v(_snake_case , config.backbone_config)
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__a : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case)
if "norm" in key:
__a : Tuple = reverse_correct_unfold_norm_order(_snake_case)
model.load_state_dict(_snake_case)
# verify on image
__a : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__a : str = Image.open(requests.get(_snake_case , stream=_snake_case).raw).convert('''RGB''')
__a : Optional[Any] = SegformerImageProcessor()
__a : Optional[int] = processor(_snake_case , return_tensors='''pt''').pixel_values
with torch.no_grad():
__a : Tuple = model(_snake_case)
__a : List[Any] = outputs.logits
print(logits.shape)
print('''First values of logits:''' , logits[0, 0, :3, :3])
# assert values
if model_name == "upernet-swin-tiny":
__a : Tuple = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]])
elif model_name == "upernet-swin-small":
__a : Optional[Any] = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]])
elif model_name == "upernet-swin-base":
__a : Union[str, Any] = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]])
elif model_name == "upernet-swin-large":
__a : Dict = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]])
print('''Logits:''' , outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1e-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(_snake_case)
print(F"""Saving processor to {pytorch_dump_folder_path}""")
processor.save_pretrained(_snake_case)
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""")
model.push_to_hub(F"""openmmlab/{model_name}""")
processor.push_to_hub(F"""openmmlab/{model_name}""")
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 160 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 0 |
from math import isqrt
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_snake_case ) + 1 ) )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple = 1_0**6 ):
"""simple docstring"""
UpperCAmelCase_: Dict = 0
UpperCAmelCase_: Dict = 1
UpperCAmelCase_: Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_snake_case )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 147 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(_snake_case ) as metadata_file:
_UpperCAmelCase = json.load(_snake_case )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_snake_case ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_snake_case ,map_location="""cpu""" )
# Load the entity vocab file
_UpperCAmelCase = load_entity_vocab(_snake_case )
_UpperCAmelCase = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken("""<ent>""" ,lstrip=_snake_case ,rstrip=_snake_case )
_UpperCAmelCase = AddedToken("""<ent2>""" ,lstrip=_snake_case ,rstrip=_snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_snake_case )
with open(os.path.join(_snake_case ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(_snake_case ,_snake_case )
_UpperCAmelCase = LukeTokenizer.from_pretrained(_snake_case )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = state_dict["""embeddings.word_embeddings.weight"""]
_UpperCAmelCase = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_UpperCAmelCase = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'''encoder.layer.{layer_index}.attention.self.'''
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_UpperCAmelCase = entity_emb[entity_vocab["""[MASK]"""]]
_UpperCAmelCase = LukeModel(config=_snake_case ).eval()
_UpperCAmelCase = model.load_state_dict(_snake_case ,strict=_snake_case )
if not (len(_snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(_snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
_UpperCAmelCase = LukeTokenizer.from_pretrained(_snake_case ,task="""entity_classification""" )
_UpperCAmelCase = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_UpperCAmelCase = (39, 42)
_UpperCAmelCase = tokenizer(_snake_case ,entity_spans=[span] ,add_prefix_space=_snake_case ,return_tensors="""pt""" )
_UpperCAmelCase = model(**_snake_case )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase = torch.Size((1, 42, 10_24) )
_UpperCAmelCase = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
_UpperCAmelCase = torch.Size((1, 42, 7_68) )
_UpperCAmelCase = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_snake_case ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase = torch.Size((1, 1, 10_24) )
_UpperCAmelCase = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
_UpperCAmelCase = torch.Size((1, 1, 7_68) )
_UpperCAmelCase = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,_snake_case ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_snake_case ) )
model.save_pretrained(_snake_case )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
with open(_snake_case ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(_snake_case ):
_UpperCAmelCase = line.rstrip().split("""\t""" )
_UpperCAmelCase = index
return entity_vocab
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
UpperCAmelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 289 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase ( a__ ):
_a = '''mctct'''
def __init__( self , _a=8065 , _a=1536 , _a=36 , _a=6144 , _a=4 , _a=384 , _a=920 , _a=1e-5 , _a=0.3 , _a="relu" , _a=0.02 , _a=0.3 , _a=0.3 , _a=1 , _a=0 , _a=2 , _a=1 , _a=0.3 , _a=1 , _a=(7,) , _a=(3,) , _a=80 , _a=1 , _a=None , _a="sum" , _a=False , **_a , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
_A : int = vocab_size
_A : Union[str, Any] = hidden_size
_A : Optional[int] = num_hidden_layers
_A : Optional[int] = intermediate_size
_A : Tuple = num_attention_heads
_A : Optional[int] = attention_head_dim
_A : str = max_position_embeddings
_A : Union[str, Any] = layer_norm_eps
_A : List[str] = layerdrop
_A : Any = hidden_act
_A : str = initializer_range
_A : str = hidden_dropout_prob
_A : Union[str, Any] = attention_probs_dropout_prob
_A : List[Any] = pad_token_id
_A : Any = bos_token_id
_A : List[Any] = eos_token_id
_A : Any = conv_glu_dim
_A : Tuple = conv_dropout
_A : Union[str, Any] = num_conv_layers
_A : Tuple = input_feat_per_channel
_A : Dict = input_channels
_A : Optional[int] = conv_channels
_A : List[str] = ctc_loss_reduction
_A : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
_A : int = list(SCREAMING_SNAKE_CASE__ )
_A : List[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 26 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
A : List[str] = logging.get_logger(__name__)
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase__ : List[str] = json.loads(_snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase__ : int = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase__ : Union[str, Any] = json.loads(_snake_case )
if not mpi_options.get("sagemaker_mpi_enabled" , _snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowercase ( a__):
"""simple docstring"""
A__ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , SCREAMING_SNAKE_CASE__ , )
@cached_property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase__ : Optional[int] = torch.device("cpu" )
lowerCamelCase__ : int = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase__ : Optional[int] = smp.local_rank()
lowerCamelCase__ : Any = torch.device("cuda" , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ : List[Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase__ : str = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase__ : Dict = torch.device("cuda" , self.local_rank )
lowerCamelCase__ : Optional[int] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase__ : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase__ : List[str] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase__ : List[Any] = torch.device("cuda" , self.local_rank )
lowerCamelCase__ : str = 1
if device.type == "cuda":
torch.cuda.set_device(SCREAMING_SNAKE_CASE__ )
return device
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return False
| 184 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 0 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_a : Any= [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_a : Dict= argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
_a : List[str]= parser.parse_args()
if args.check_lib:
_a : Tuple= importlib.import_module("transformers")
_a : Tuple= Path(transformers_module.__file__).parent
else:
_a : str= Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 172 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 0 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_snake_case : Optional[Any] = 'src/diffusers'
_snake_case : Any = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_snake_case : int = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_snake_case : Optional[int] = spec.loader.load_module()
def A__ ( UpperCamelCase , UpperCamelCase ):
return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , _snake_case ) is not None
def A__ ( UpperCamelCase ):
A = object_name.split("." )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case , F"{module}.py" ) ):
i += 1
if i < len(_snake_case ):
A = os.path.join(_snake_case , parts[i] )
if i >= len(_snake_case ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(_snake_case , F"{module}.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(_snake_case ) and re.search(rF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_snake_case ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(_snake_case ) and _should_continue(lines[line_index] , _snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(_snake_case )
_snake_case : Optional[int] = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_snake_case : Any = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
_snake_case : Optional[Any] = re.compile(r'<FILL\s+[^>]*>')
def A__ ( UpperCamelCase ):
A = code.split("\n" )
A = 0
while idx < len(_snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_snake_case ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def A__ ( UpperCamelCase ):
A = len(get_indent(_snake_case ) ) > 0
if has_indent:
A = F"class Bla:\n{code}"
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_snake_case )
A = black.format_str(_snake_case , mode=_snake_case )
A = style_docstrings_in_code(_snake_case )
return result[len("class Bla:\n" ) :] if has_indent else result
def A__ ( UpperCamelCase , UpperCamelCase=False ):
with open(_snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_snake_case ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A = search.groups()
A = find_code_in_diffusers(_snake_case )
A = get_indent(_snake_case )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(_snake_case ) and should_continue:
line_index += 1
if line_index >= len(_snake_case ):
break
A = lines[line_index]
A = _should_continue(_snake_case , _snake_case ) and re.search(F"^{indent}# End copy" , _snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(_snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_snake_case ) is None]
A = """\n""".join(_snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(_snake_case ) > 0:
A = replace_pattern.replace("with" , "" ).split("," )
A = [_re_replace_pattern.search(_snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A = pattern.groups()
A = re.sub(_snake_case , _snake_case , _snake_case )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , _snake_case )
A = re.sub(obja.upper() , obja.upper() , _snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(_snake_case ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(_snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_snake_case )
return diffs
def A__ ( UpperCamelCase = False ):
A = glob.glob(os.path.join(_snake_case , "**/*.py" ) , recursive=_snake_case )
A = []
for filename in all_files:
A = is_copy_consistent(_snake_case , _snake_case )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(_snake_case ) > 0:
A = """\n""".join(_snake_case )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_snake_case : Dict = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 292 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.